[llvm] 8663926 - [Mips] Convert some tests to opaque pointers (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 03:56:34 PST 2022


Author: Nikita Popov
Date: 2022-12-19T12:56:12+01:00
New Revision: 8663926a544602932d299dda435ed1ef70a05f48

URL: https://github.com/llvm/llvm-project/commit/8663926a544602932d299dda435ed1ef70a05f48
DIFF: https://github.com/llvm/llvm-project/commit/8663926a544602932d299dda435ed1ef70a05f48.diff

LOG: [Mips] Convert some tests to opaque pointers (NFC)

Added: 
    

Modified: 
    llvm/test/CodeGen/Mips/2008-07-03-SRet.ll
    llvm/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
    llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll
    llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll
    llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
    llvm/test/CodeGen/Mips/2008-08-06-Alloca.ll
    llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
    llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
    llvm/test/CodeGen/Mips/2010-07-20-Switch.ll
    llvm/test/CodeGen/Mips/2011-05-26-BranchKillsVreg.ll
    llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll
    llvm/test/CodeGen/Mips/Fast-ISel/br1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/bswap1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll
    llvm/test/CodeGen/Mips/Fast-ISel/constexpr-address.ll
    llvm/test/CodeGen/Mips/Fast-ISel/div1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/double-arg.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fast-isel-softfloat-lower-args.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
    llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
    llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll
    llvm/test/CodeGen/Mips/Fast-ISel/icmpbr1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/icmpi1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
    llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
    llvm/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll
    llvm/test/CodeGen/Mips/Fast-ISel/logopm.ll
    llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll
    llvm/test/CodeGen/Mips/Fast-ISel/pr40325.ll
    llvm/test/CodeGen/Mips/Fast-ISel/rem1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll
    llvm/test/CodeGen/Mips/Fast-ISel/shftopm.ll
    llvm/test/CodeGen/Mips/Fast-ISel/shift.ll
    llvm/test/CodeGen/Mips/Fast-ISel/simplestore.ll
    llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
    llvm/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
    llvm/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
    llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
    llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
    llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/aggregate_struct_return.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/brindirect.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/call.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/dyn_stackalloc.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fence.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address_pic.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/inttoptr_and_ptrtoint.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_4_unaligned.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_fold.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sret_pointer.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_4_unaligned.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec_builtin.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/var_arg.ll
    llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/zextLoad_and_sextLoad.ll
    llvm/test/CodeGen/Mips/addc.ll
    llvm/test/CodeGen/Mips/addi.ll
    llvm/test/CodeGen/Mips/address-selection.ll
    llvm/test/CodeGen/Mips/addressing-mode.ll
    llvm/test/CodeGen/Mips/adjust-callstack-sp.ll
    llvm/test/CodeGen/Mips/align16.ll
    llvm/test/CodeGen/Mips/alloca.ll
    llvm/test/CodeGen/Mips/alloca16.ll
    llvm/test/CodeGen/Mips/and1.ll
    llvm/test/CodeGen/Mips/atomic-min-max-64.ll
    llvm/test/CodeGen/Mips/atomic-min-max.ll
    llvm/test/CodeGen/Mips/atomic.ll
    llvm/test/CodeGen/Mips/atomic64.ll
    llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
    llvm/test/CodeGen/Mips/atomicops.ll
    llvm/test/CodeGen/Mips/beqzc.ll
    llvm/test/CodeGen/Mips/beqzc1.ll
    llvm/test/CodeGen/Mips/biggot.ll
    llvm/test/CodeGen/Mips/blockaddr.ll
    llvm/test/CodeGen/Mips/branch-relaxation-with-hazard.ll
    llvm/test/CodeGen/Mips/brconeq.ll
    llvm/test/CodeGen/Mips/brconeqk.ll
    llvm/test/CodeGen/Mips/brconeqz.ll
    llvm/test/CodeGen/Mips/brconge.ll
    llvm/test/CodeGen/Mips/brcongt.ll
    llvm/test/CodeGen/Mips/brconle.ll
    llvm/test/CodeGen/Mips/brconlt.ll
    llvm/test/CodeGen/Mips/brconne.ll
    llvm/test/CodeGen/Mips/brconnek.ll
    llvm/test/CodeGen/Mips/brconnez.ll
    llvm/test/CodeGen/Mips/brdelayslot.ll
    llvm/test/CodeGen/Mips/brind-tailcall.ll
    llvm/test/CodeGen/Mips/brind.ll
    llvm/test/CodeGen/Mips/brundef.ll
    llvm/test/CodeGen/Mips/buildpairextractelementf64.ll
    llvm/test/CodeGen/Mips/cache-intrinsic.ll
    llvm/test/CodeGen/Mips/call-optimization.ll
    llvm/test/CodeGen/Mips/cconv/arguments-float.ll
    llvm/test/CodeGen/Mips/cconv/arguments-fp128.ll
    llvm/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
    llvm/test/CodeGen/Mips/cconv/arguments-hard-float.ll
    llvm/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
    llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll
    llvm/test/CodeGen/Mips/cconv/arguments-struct.ll
    llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
    llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
    llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
    llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll
    llvm/test/CodeGen/Mips/cconv/arguments.ll
    llvm/test/CodeGen/Mips/cconv/byval.ll
    llvm/test/CodeGen/Mips/cconv/memory-layout.ll
    llvm/test/CodeGen/Mips/cconv/return-float.ll
    llvm/test/CodeGen/Mips/cconv/return-hard-float.ll
    llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll
    llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
    llvm/test/CodeGen/Mips/cconv/return.ll
    llvm/test/CodeGen/Mips/cconv/roundl-call.ll
    llvm/test/CodeGen/Mips/cconv/vector.ll
    llvm/test/CodeGen/Mips/cfi_offset.ll
    llvm/test/CodeGen/Mips/ci2.ll
    llvm/test/CodeGen/Mips/cmov.ll
    llvm/test/CodeGen/Mips/cmplarge.ll
    llvm/test/CodeGen/Mips/compactbranches/beqc-bnec-register-constraint.ll
    llvm/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
    llvm/test/CodeGen/Mips/compactbranches/compact-branches.ll
    llvm/test/CodeGen/Mips/compactbranches/unsafe-in-forbidden-slot.ll
    llvm/test/CodeGen/Mips/const1.ll
    llvm/test/CodeGen/Mips/const4a.ll
    llvm/test/CodeGen/Mips/const6.ll
    llvm/test/CodeGen/Mips/const6a.ll
    llvm/test/CodeGen/Mips/constraint-c-err.ll
    llvm/test/CodeGen/Mips/constraint-c.ll
    llvm/test/CodeGen/Mips/constraint-empty.ll
    llvm/test/CodeGen/Mips/cprestore.ll
    llvm/test/CodeGen/Mips/cstmaterialization/stack.ll
    llvm/test/CodeGen/Mips/ctlz.ll
    llvm/test/CodeGen/Mips/dagcombine-store-gep-chain-slow.ll
    llvm/test/CodeGen/Mips/delay-slot-fill-forward.ll
    llvm/test/CodeGen/Mips/dins.ll
    llvm/test/CodeGen/Mips/disable-tail-merge.ll
    llvm/test/CodeGen/Mips/div.ll
    llvm/test/CodeGen/Mips/div_rem.ll
    llvm/test/CodeGen/Mips/divrem.ll
    llvm/test/CodeGen/Mips/divu.ll
    llvm/test/CodeGen/Mips/divu_remu.ll
    llvm/test/CodeGen/Mips/dsp-patterns.ll
    llvm/test/CodeGen/Mips/dsp-r1.ll
    llvm/test/CodeGen/Mips/dsp-vec-load-store.ll
    llvm/test/CodeGen/Mips/dynamic-stack-realignment.ll
    llvm/test/CodeGen/Mips/eh-dwarf-cfa.ll
    llvm/test/CodeGen/Mips/eh-return32.ll
    llvm/test/CodeGen/Mips/eh-return64.ll
    llvm/test/CodeGen/Mips/eh.ll
    llvm/test/CodeGen/Mips/ehframe-indirect.ll
    llvm/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
    llvm/test/CodeGen/Mips/emit-big-cst.ll
    llvm/test/CodeGen/Mips/emutls_generic.ll
    llvm/test/CodeGen/Mips/ex2.ll
    llvm/test/CodeGen/Mips/extins.ll
    llvm/test/CodeGen/Mips/f16abs.ll
    llvm/test/CodeGen/Mips/f32-to-i64-single-float.ll
    llvm/test/CodeGen/Mips/fastcc.ll
    llvm/test/CodeGen/Mips/fastcc_byval.ll
    llvm/test/CodeGen/Mips/fixdfsf.ll
    llvm/test/CodeGen/Mips/fp-contract.ll
    llvm/test/CodeGen/Mips/fp-indexed-ls.ll
    llvm/test/CodeGen/Mips/fp-spill-reload.ll
    llvm/test/CodeGen/Mips/fp16-promote.ll
    llvm/test/CodeGen/Mips/fp16instrinsmc.ll
    llvm/test/CodeGen/Mips/fp16static.ll
    llvm/test/CodeGen/Mips/fpneeded.ll
    llvm/test/CodeGen/Mips/fpnotneeded.ll
    llvm/test/CodeGen/Mips/frame-address-err.ll
    llvm/test/CodeGen/Mips/frame-address.ll
    llvm/test/CodeGen/Mips/frameindex.ll
    llvm/test/CodeGen/Mips/global-address.ll
    llvm/test/CodeGen/Mips/global-pointer-reg.ll
    llvm/test/CodeGen/Mips/gpopt-explict-section.ll
    llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll
    llvm/test/CodeGen/Mips/gprestore.ll
    llvm/test/CodeGen/Mips/helloworld.ll
    llvm/test/CodeGen/Mips/hf16_1.ll
    llvm/test/CodeGen/Mips/hf16call32.ll
    llvm/test/CodeGen/Mips/hf16call32_body.ll
    llvm/test/CodeGen/Mips/hf1_body.ll
    llvm/test/CodeGen/Mips/hfptrcall.ll
    llvm/test/CodeGen/Mips/i32k.ll
    llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
    llvm/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
    llvm/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
    llvm/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
    llvm/test/CodeGen/Mips/indirectcall.ll
    llvm/test/CodeGen/Mips/init-array.ll
    llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-r-i1.ll
    llvm/test/CodeGen/Mips/inlineasm-constraint-reg.ll
    llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
    llvm/test/CodeGen/Mips/inlineasm-output-template.ll
    llvm/test/CodeGen/Mips/insn-zero-size-bb.ll
    llvm/test/CodeGen/Mips/int-to-float-conversion.ll
    llvm/test/CodeGen/Mips/internalfunc.ll
    llvm/test/CodeGen/Mips/interrupt-attr.ll
    llvm/test/CodeGen/Mips/jtstat.ll
    llvm/test/CodeGen/Mips/jumptable_labels.ll
    llvm/test/CodeGen/Mips/l3mc.ll
    llvm/test/CodeGen/Mips/largeimm1.ll
    llvm/test/CodeGen/Mips/largeimmprinting.ll
    llvm/test/CodeGen/Mips/lb1.ll
    llvm/test/CodeGen/Mips/lbu1.ll
    llvm/test/CodeGen/Mips/lcb2.ll
    llvm/test/CodeGen/Mips/lcb3c.ll
    llvm/test/CodeGen/Mips/lcb4a.ll
    llvm/test/CodeGen/Mips/lcb5.ll
    llvm/test/CodeGen/Mips/lh1.ll
    llvm/test/CodeGen/Mips/lhu1.ll
    llvm/test/CodeGen/Mips/llcarry.ll
    llvm/test/CodeGen/Mips/llvm-ir/addrspacecast.ll
    llvm/test/CodeGen/Mips/llvm-ir/atomicrmx.ll
    llvm/test/CodeGen/Mips/llvm-ir/call.ll
    llvm/test/CodeGen/Mips/llvm-ir/indirectbr.ll
    llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
    llvm/test/CodeGen/Mips/llvm-ir/load-atomic.ll
    llvm/test/CodeGen/Mips/llvm-ir/load.ll
    llvm/test/CodeGen/Mips/llvm-ir/select-int.ll
    llvm/test/CodeGen/Mips/llvm-ir/store-atomic.ll
    llvm/test/CodeGen/Mips/llvm-ir/store.ll
    llvm/test/CodeGen/Mips/long-calls.ll
    llvm/test/CodeGen/Mips/longbranch.ll
    llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-1.ll
    llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-2.ll
    llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-3.ll
    llvm/test/CodeGen/Mips/lw16-base-reg.ll
    llvm/test/CodeGen/Mips/machineverifier.ll
    llvm/test/CodeGen/Mips/mbrsize4a.ll
    llvm/test/CodeGen/Mips/memcpy.ll
    llvm/test/CodeGen/Mips/micromips-addiu.ll
    llvm/test/CodeGen/Mips/micromips-addu16.ll
    llvm/test/CodeGen/Mips/micromips-and16.ll
    llvm/test/CodeGen/Mips/micromips-andi.ll
    llvm/test/CodeGen/Mips/micromips-atomic.ll
    llvm/test/CodeGen/Mips/micromips-atomic1.ll
    llvm/test/CodeGen/Mips/micromips-b-range.ll
    llvm/test/CodeGen/Mips/micromips-compact-branches.ll
    llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll
    llvm/test/CodeGen/Mips/micromips-delay-slot.ll
    llvm/test/CodeGen/Mips/micromips-gcc-except-table.ll
    llvm/test/CodeGen/Mips/micromips-gp-rc.ll
    llvm/test/CodeGen/Mips/micromips-jal.ll
    llvm/test/CodeGen/Mips/micromips-li.ll
    llvm/test/CodeGen/Mips/micromips-load-effective-address.ll
    llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
    llvm/test/CodeGen/Mips/micromips-not16.ll
    llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll
    llvm/test/CodeGen/Mips/micromips-shift.ll
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-addiur1sp-addiusp.ll
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lbu16-lhu16-sb16-sh16.ll
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.ll
    llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll
    llvm/test/CodeGen/Mips/micromips-subu16.ll
    llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll
    llvm/test/CodeGen/Mips/micromips-sw.ll
    llvm/test/CodeGen/Mips/micromips-target-external-symbol-reloc.ll
    llvm/test/CodeGen/Mips/micromips-xor16.ll
    llvm/test/CodeGen/Mips/mips1-load-delay.ll
    llvm/test/CodeGen/Mips/mips16_32_8.ll
    llvm/test/CodeGen/Mips/mips16_fpret.ll
    llvm/test/CodeGen/Mips/mips16ex.ll
    llvm/test/CodeGen/Mips/mips16fpe.ll
    llvm/test/CodeGen/Mips/mips3-spill-slot.ll
    llvm/test/CodeGen/Mips/mips64-f128-call.ll
    llvm/test/CodeGen/Mips/mips64-f128.ll
    llvm/test/CodeGen/Mips/mips64-sret.ll
    llvm/test/CodeGen/Mips/mips64directive.ll
    llvm/test/CodeGen/Mips/mips64fpldst.ll
    llvm/test/CodeGen/Mips/mips64instrs.ll
    llvm/test/CodeGen/Mips/mips64intldst.ll
    llvm/test/CodeGen/Mips/mips64lea.ll
    llvm/test/CodeGen/Mips/mips64signextendsesf.ll
    llvm/test/CodeGen/Mips/mips64sinttofpsf.ll
    llvm/test/CodeGen/Mips/mipslopat.ll
    llvm/test/CodeGen/Mips/misha.ll
    llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll
    llvm/test/CodeGen/Mips/msa/2r.ll
    llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll
    llvm/test/CodeGen/Mips/msa/2rf.ll
    llvm/test/CodeGen/Mips/msa/2rf_exup.ll
    llvm/test/CodeGen/Mips/msa/2rf_float_int.ll
    llvm/test/CodeGen/Mips/msa/2rf_fq.ll
    llvm/test/CodeGen/Mips/msa/2rf_int_float.ll
    llvm/test/CodeGen/Mips/msa/2rf_tq.ll
    llvm/test/CodeGen/Mips/msa/3r-a.ll
    llvm/test/CodeGen/Mips/msa/3r-b.ll
    llvm/test/CodeGen/Mips/msa/3r-c.ll
    llvm/test/CodeGen/Mips/msa/3r-d.ll
    llvm/test/CodeGen/Mips/msa/3r-i.ll
    llvm/test/CodeGen/Mips/msa/3r-m.ll
    llvm/test/CodeGen/Mips/msa/3r-p.ll
    llvm/test/CodeGen/Mips/msa/3r-s.ll
    llvm/test/CodeGen/Mips/msa/3r-v.ll
    llvm/test/CodeGen/Mips/msa/3r_4r.ll
    llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll
    llvm/test/CodeGen/Mips/msa/3r_splat.ll
    llvm/test/CodeGen/Mips/msa/3rf.ll
    llvm/test/CodeGen/Mips/msa/3rf_4rf.ll
    llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll
    llvm/test/CodeGen/Mips/msa/3rf_exdo.ll
    llvm/test/CodeGen/Mips/msa/3rf_float_int.ll
    llvm/test/CodeGen/Mips/msa/3rf_int_float.ll
    llvm/test/CodeGen/Mips/msa/3rf_q.ll
    llvm/test/CodeGen/Mips/msa/arithmetic.ll
    llvm/test/CodeGen/Mips/msa/arithmetic_float.ll
    llvm/test/CodeGen/Mips/msa/avoid_vector_shift_combines.ll
    llvm/test/CodeGen/Mips/msa/basic_operations.ll
    llvm/test/CodeGen/Mips/msa/basic_operations_float.ll
    llvm/test/CodeGen/Mips/msa/bit.ll
    llvm/test/CodeGen/Mips/msa/bitcast.ll
    llvm/test/CodeGen/Mips/msa/bitwise.ll
    llvm/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
    llvm/test/CodeGen/Mips/msa/compare.ll
    llvm/test/CodeGen/Mips/msa/compare_float.ll
    llvm/test/CodeGen/Mips/msa/elm_copy.ll
    llvm/test/CodeGen/Mips/msa/elm_insv.ll
    llvm/test/CodeGen/Mips/msa/elm_move.ll
    llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll
    llvm/test/CodeGen/Mips/msa/endian.ll
    llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
    llvm/test/CodeGen/Mips/msa/fexuprl.ll
    llvm/test/CodeGen/Mips/msa/frameindex.ll
    llvm/test/CodeGen/Mips/msa/i10.ll
    llvm/test/CodeGen/Mips/msa/i5-a.ll
    llvm/test/CodeGen/Mips/msa/i5-b.ll
    llvm/test/CodeGen/Mips/msa/i5-c.ll
    llvm/test/CodeGen/Mips/msa/i5-m.ll
    llvm/test/CodeGen/Mips/msa/i5-s.ll
    llvm/test/CodeGen/Mips/msa/i5_ld_st.ll
    llvm/test/CodeGen/Mips/msa/i8.ll
    llvm/test/CodeGen/Mips/msa/immediates-bad.ll
    llvm/test/CodeGen/Mips/msa/immediates.ll
    llvm/test/CodeGen/Mips/msa/inline-asm.ll
    llvm/test/CodeGen/Mips/msa/ldr_str.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
    llvm/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
    llvm/test/CodeGen/Mips/msa/msa-nooddspreg.ll
    llvm/test/CodeGen/Mips/msa/shift-dagcombine.ll
    llvm/test/CodeGen/Mips/msa/shift_constant_pool.ll
    llvm/test/CodeGen/Mips/msa/shift_no_and.ll
    llvm/test/CodeGen/Mips/msa/shuffle.ll
    llvm/test/CodeGen/Mips/msa/spill.ll
    llvm/test/CodeGen/Mips/msa/vec.ll
    llvm/test/CodeGen/Mips/msa/vecs10.ll
    llvm/test/CodeGen/Mips/mul.ll
    llvm/test/CodeGen/Mips/mulll.ll
    llvm/test/CodeGen/Mips/nacl-align.ll
    llvm/test/CodeGen/Mips/nacl-branch-delay.ll
    llvm/test/CodeGen/Mips/nacl-reserved-regs.ll
    llvm/test/CodeGen/Mips/neg1.ll
    llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
    llvm/test/CodeGen/Mips/nomips16.ll
    llvm/test/CodeGen/Mips/not1.ll
    llvm/test/CodeGen/Mips/o32_cc_byval.ll
    llvm/test/CodeGen/Mips/o32_cc_vararg.ll
    llvm/test/CodeGen/Mips/octeon.ll
    llvm/test/CodeGen/Mips/optimize-pic-o0.ll
    llvm/test/CodeGen/Mips/or1.ll
    llvm/test/CodeGen/Mips/overflow-intrinsic-optimizations.ll
    llvm/test/CodeGen/Mips/pr33682.ll
    llvm/test/CodeGen/Mips/pr33978.ll
    llvm/test/CodeGen/Mips/pr34975.ll
    llvm/test/CodeGen/Mips/pr35071.ll
    llvm/test/CodeGen/Mips/pr42736.ll
    llvm/test/CodeGen/Mips/prevent-hoisting.ll
    llvm/test/CodeGen/Mips/private-addr.ll
    llvm/test/CodeGen/Mips/private.ll
    llvm/test/CodeGen/Mips/ra-allocatable.ll
    llvm/test/CodeGen/Mips/rdhwr-directives.ll
    llvm/test/CodeGen/Mips/reloc-jalr.ll
    llvm/test/CodeGen/Mips/rem.ll
    llvm/test/CodeGen/Mips/remu.ll
    llvm/test/CodeGen/Mips/return_address.ll
    llvm/test/CodeGen/Mips/return_address_err.ll
    llvm/test/CodeGen/Mips/s2rem.ll
    llvm/test/CodeGen/Mips/sb1.ll
    llvm/test/CodeGen/Mips/sel1c.ll
    llvm/test/CodeGen/Mips/sel2c.ll
    llvm/test/CodeGen/Mips/selTBteqzCmpi.ll
    llvm/test/CodeGen/Mips/selTBtnezCmpi.ll
    llvm/test/CodeGen/Mips/selTBtnezSlti.ll
    llvm/test/CodeGen/Mips/select.ll
    llvm/test/CodeGen/Mips/selectcc.ll
    llvm/test/CodeGen/Mips/selectiondag-optlevel.ll
    llvm/test/CodeGen/Mips/seleq.ll
    llvm/test/CodeGen/Mips/seleqk.ll
    llvm/test/CodeGen/Mips/selgek.ll
    llvm/test/CodeGen/Mips/selgt.ll
    llvm/test/CodeGen/Mips/selle.ll
    llvm/test/CodeGen/Mips/selltk.ll
    llvm/test/CodeGen/Mips/selne.ll
    llvm/test/CodeGen/Mips/selnek.ll
    llvm/test/CodeGen/Mips/selpat.ll
    llvm/test/CodeGen/Mips/setcc-se.ll
    llvm/test/CodeGen/Mips/seteq.ll
    llvm/test/CodeGen/Mips/seteqz.ll
    llvm/test/CodeGen/Mips/setge.ll
    llvm/test/CodeGen/Mips/setgek.ll
    llvm/test/CodeGen/Mips/setle.ll
    llvm/test/CodeGen/Mips/setlt.ll
    llvm/test/CodeGen/Mips/setltk.ll
    llvm/test/CodeGen/Mips/setne.ll
    llvm/test/CodeGen/Mips/setuge.ll
    llvm/test/CodeGen/Mips/setugt.ll
    llvm/test/CodeGen/Mips/setule.ll
    llvm/test/CodeGen/Mips/setult.ll
    llvm/test/CodeGen/Mips/setultk.ll
    llvm/test/CodeGen/Mips/sh1.ll
    llvm/test/CodeGen/Mips/simplebr.ll
    llvm/test/CodeGen/Mips/sint-fp-store_pattern.ll
    llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll
    llvm/test/CodeGen/Mips/sll1.ll
    llvm/test/CodeGen/Mips/sll2.ll
    llvm/test/CodeGen/Mips/small-section-reserve-gp.ll
    llvm/test/CodeGen/Mips/spill-copy-acreg.ll
    llvm/test/CodeGen/Mips/sr1.ll
    llvm/test/CodeGen/Mips/sra1.ll
    llvm/test/CodeGen/Mips/sra2.ll
    llvm/test/CodeGen/Mips/srl1.ll
    llvm/test/CodeGen/Mips/srl2.ll
    llvm/test/CodeGen/Mips/stackcoloring.ll
    llvm/test/CodeGen/Mips/stchar.ll
    llvm/test/CodeGen/Mips/stldst.ll
    llvm/test/CodeGen/Mips/sub1.ll
    llvm/test/CodeGen/Mips/sub2.ll
    llvm/test/CodeGen/Mips/swzero.ll
    llvm/test/CodeGen/Mips/tail16.ll
    llvm/test/CodeGen/Mips/tailcall/tailcall.ll
    llvm/test/CodeGen/Mips/tglobaladdr-wrapper.ll
    llvm/test/CodeGen/Mips/thread-pointer.ll
    llvm/test/CodeGen/Mips/tls-alias.ll
    llvm/test/CodeGen/Mips/tls-models.ll
    llvm/test/CodeGen/Mips/tls-static.ll
    llvm/test/CodeGen/Mips/tls.ll
    llvm/test/CodeGen/Mips/tls16.ll
    llvm/test/CodeGen/Mips/tls16_2.ll
    llvm/test/CodeGen/Mips/uitofp.ll
    llvm/test/CodeGen/Mips/ul1.ll
    llvm/test/CodeGen/Mips/unaligned-memops.ll
    llvm/test/CodeGen/Mips/unalignedload.ll
    llvm/test/CodeGen/Mips/unsized-global.ll
    llvm/test/CodeGen/Mips/v2i16tof32.ll
    llvm/test/CodeGen/Mips/vector-load-store.ll
    llvm/test/CodeGen/Mips/vector-setcc.ll
    llvm/test/CodeGen/Mips/weak.ll
    llvm/test/CodeGen/Mips/whitespace.ll
    llvm/test/CodeGen/Mips/xor1.ll
    llvm/test/CodeGen/Mips/zeroreg.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/Mips/2008-07-03-SRet.ll b/llvm/test/CodeGen/Mips/2008-07-03-SRet.ll
index f586b5823b9d6..7654ef271fcc9 100644
--- a/llvm/test/CodeGen/Mips/2008-07-03-SRet.ll
+++ b/llvm/test/CodeGen/Mips/2008-07-03-SRet.ll
@@ -2,17 +2,17 @@
 
 %struct.sret0 = type { i32, i32, i32 }
 
-define void @test0(%struct.sret0* noalias sret(%struct.sret0) %agg.result, i32 %dummy) nounwind {
+define void @test0(ptr noalias sret(%struct.sret0) %agg.result, i32 %dummy) nounwind {
 entry:
 ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
 ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
 ; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
-  getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 0    ; <i32*>:0 [#uses=1]
-  store i32 %dummy, i32* %0, align 4
-  getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 1    ; <i32*>:1 [#uses=1]
-  store i32 %dummy, i32* %1, align 4
-  getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 2    ; <i32*>:2 [#uses=1]
-  store i32 %dummy, i32* %2, align 4
+  getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 0    ; <ptr>:0 [#uses=1]
+  store i32 %dummy, ptr %0, align 4
+  getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 1    ; <ptr>:1 [#uses=1]
+  store i32 %dummy, ptr %1, align 4
+  getelementptr %struct.sret0, ptr %agg.result, i32 0, i32 2    ; <ptr>:2 [#uses=1]
+  store i32 %dummy, ptr %2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/2008-07-15-InternalConstant.ll b/llvm/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
index b3d22880925ec..56f1569003a32 100644
--- a/llvm/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
+++ b/llvm/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
@@ -3,20 +3,20 @@
 @.str = internal unnamed_addr constant [10 x i8] c"AAAAAAAAA\00"
 @i0 = internal unnamed_addr constant [5 x i32] [ i32 0, i32 1, i32 2, i32 3, i32 4 ]
 
-define i8* @foo() nounwind {
+define ptr @foo() nounwind {
 entry:
 ; CHECK: foo
 ; CHECK: %hi(.str)
 ; CHECK: %lo(.str)
-	ret i8* getelementptr ([10 x i8], [10 x i8]* @.str, i32 0, i32 0)
+	ret ptr @.str
 }
 
-define i32* @bar() nounwind  {
+define ptr @bar() nounwind  {
 entry:
 ; CHECK: bar
 ; CHECK: %hi(i0)
 ; CHECK: %lo(i0)
-  ret i32* getelementptr ([5 x i32], [5 x i32]* @i0, i32 0, i32 0)
+  ret ptr @i0
 }
 
 ; CHECK: rodata.str1.4,"aMS", at progbits

diff  --git a/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll b/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll
index a1bc3122e208b..f5a761cb644be 100644
--- a/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll
+++ b/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll
@@ -47,15 +47,15 @@
 ; COMMON-NEXT:  .section  .sbss,"aw", at nobits
 @bar = global %struct.anon zeroinitializer
 
-define i8* @A0() nounwind {
+define ptr @A0() nounwind {
 entry:
-	ret i8* getelementptr ([8 x i8], [8 x i8]* @s0, i32 0, i32 0)
+	ret ptr @s0
 }
 
 define i32 @A1() nounwind {
 entry:
-  load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 0), align 8 
-  load i32, i32* getelementptr (%struct.anon, %struct.anon* @foo, i32 0, i32 1), align 4 
+  load i32, ptr @foo, align 8 
+  load i32, ptr getelementptr (%struct.anon, ptr @foo, i32 0, i32 1), align 4 
   add i32 %1, %0
   ret i32 %2
 }

diff  --git a/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll
index 5edba029502a1..74a3f218379b5 100644
--- a/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll
+++ b/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll
@@ -26,28 +26,28 @@ entry:
 define void @foo0() nounwind {
 entry:
 ; CHECK: addu
-  %0 = load i32, i32* @gi1, align 4
-  %1 = load i32, i32* @gi0, align 4
+  %0 = load i32, ptr @gi1, align 4
+  %1 = load i32, ptr @gi0, align 4
   %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind
-  store i32 %2, i32* @gi2, align 4
+  store i32 %2, ptr @gi2, align 4
   ret void
 }
 
 define void @foo2() nounwind {
 entry:
 ; CHECK: neg.s
-  %0 = load float, float* @gf1, align 4
+  %0 = load float, ptr @gf1, align 4
   %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind
-  store float %1, float* @gf0, align 4
+  store float %1, ptr @gf0, align 4
   ret void
 }
 
 define void @foo3() nounwind {
 entry:
 ; CHECK: neg.d
-  %0 = load double, double* @gd1, align 8
+  %0 = load double, ptr @gd1, align 8
   %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind
-  store double %1, double* @gd0, align 8
+  store double %1, ptr @gd0, align 8
   ret void
 }
 
@@ -63,9 +63,9 @@ entry:
 define void @foo4() {
 entry:
   %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"()
-  store i32 %0, i32* @gi2, align 4
-  %1 = load float, float* @gf0, align 4
+  store i32 %0, ptr @gi2, align 4
+  %1 = load float, ptr @gf0, align 4
   %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1)
-  store double %2, double* @gd0, align 8
+  store double %2, ptr @gd0, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll b/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
index 592e574a3622b..7c19c15ca7bb4 100644
--- a/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
+++ b/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
@@ -4,15 +4,15 @@
 
 define double @main(...) {
 entry:
-        %retval = alloca double         ; <double*> [#uses=3]
-        store double 0.000000e+00, double* %retval
-        %r = alloca double              ; <double*> [#uses=1]
-        load double, double* %r         ; <double>:0 [#uses=1]
-        store double %0, double* %retval
+        %retval = alloca double         ; <ptr> [#uses=3]
+        store double 0.000000e+00, ptr %retval
+        %r = alloca double              ; <ptr> [#uses=1]
+        load double, ptr %r         ; <double>:0 [#uses=1]
+        store double %0, ptr %retval
         br label %return
 
 return:         ; preds = %entry
-        load double, double* %retval            ; <double>:1 [#uses=1]
+        load double, ptr %retval            ; <double>:1 [#uses=1]
         ret double %1
 }
 

diff  --git a/llvm/test/CodeGen/Mips/2008-08-06-Alloca.ll b/llvm/test/CodeGen/Mips/2008-08-06-Alloca.ll
index 0d94b19e46291..84d92e3189ea1 100644
--- a/llvm/test/CodeGen/Mips/2008-08-06-Alloca.ll
+++ b/llvm/test/CodeGen/Mips/2008-08-06-Alloca.ll
@@ -4,12 +4,12 @@ define i32 @twoalloca(i32 %size) nounwind {
 entry:
 ; CHECK: subu ${{[0-9]+}}, $sp
 ; CHECK: subu ${{[0-9]+}}, $sp
-  alloca i8, i32 %size    ; <i8*>:0 [#uses=1]
-  alloca i8, i32 %size    ; <i8*>:1 [#uses=1]
-  call i32 @foo( i8* %0 ) nounwind    ; <i32>:2 [#uses=1]
-  call i32 @foo( i8* %1 ) nounwind    ; <i32>:3 [#uses=1]
+  alloca i8, i32 %size    ; <ptr>:0 [#uses=1]
+  alloca i8, i32 %size    ; <ptr>:1 [#uses=1]
+  call i32 @foo( ptr %0 ) nounwind    ; <i32>:2 [#uses=1]
+  call i32 @foo( ptr %1 ) nounwind    ; <i32>:3 [#uses=1]
   add i32 %3, %2    ; <i32>:4 [#uses=1]
   ret i32 %4
 }
 
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)

diff  --git a/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll b/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
index eaf6ddc911e3c..d2e054b17973d 100644
--- a/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
+++ b/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
@@ -1,14 +1,14 @@
 ; RUN: llc < %s -march=mips
 ; PR2794
 
-define i32 @main(i8*) nounwind {
+define i32 @main(ptr) nounwind {
 entry:
         br label %continue.outer
 
 continue.outer:         ; preds = %case4, %entry
         %p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ]          ; <i32> [#uses=2]
-        %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec         ; <i8*> [#uses=1]
-        %1 = load i8, i8* %p.0.ph           ; <i8> [#uses=1]
+        %p.0.ph = getelementptr i8, ptr %0, i32 %p.0.ph.rec         ; <ptr> [#uses=1]
+        %1 = load i8, ptr %p.0.ph           ; <i8> [#uses=1]
         switch i8 %1, label %infloop [
                 i8 0, label %return.split
                 i8 76, label %case4

diff  --git a/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll b/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
index 789f7ee3d4b42..d80da658d9283 100644
--- a/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
+++ b/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
@@ -1,52 +1,52 @@
 ; RUN: llc -march=mips -mattr=+soft-float < %s
 ; PR2667
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-	%struct._Bigint = type { %struct._Bigint*, i32, i32, i32, i32, [1 x i32] }
-	%struct.__FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*, i8*, i32)*, i32 (i8*, i8*, i32)*, i32 (i8*, i32, i32)*, i32 (i8*)*, %struct.__sbuf, i8*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i32, %struct._reent*, i32 }
-	%struct.__sbuf = type { i8*, i32 }
-	%struct._atexit = type { %struct._atexit*, i32, [32 x void ()*], %struct._on_exit_args }
-	%struct._glue = type { %struct._glue*, i32, %struct.__FILE* }
-	%struct._on_exit_args = type { [32 x i8*], [32 x i8*], i32, i32 }
-	%struct._reent = type { i32, %struct.__FILE*, %struct.__FILE*, %struct.__FILE*, i32, [25 x i8], i32, i8*, i32, void (%struct._reent*)*, %struct._Bigint*, i32, %struct._Bigint*, %struct._Bigint**, i32, i8*, { { [30 x i8*], [30 x i32] } }, %struct._atexit*, %struct._atexit, void (i32)**, %struct._glue, [3 x %struct.__FILE] }
- at _impure_ptr = external global %struct._reent*		; <%struct._reent**> [#uses=1]
+	%struct._Bigint = type { ptr, i32, i32, i32, i32, [1 x i32] }
+	%struct.__FILE = type { ptr, i32, i32, i16, i16, %struct.__sbuf, i32, ptr, ptr, ptr, ptr, ptr, %struct.__sbuf, ptr, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i32, ptr, i32 }
+	%struct.__sbuf = type { ptr, i32 }
+	%struct._atexit = type { ptr, i32, [32 x ptr], %struct._on_exit_args }
+	%struct._glue = type { ptr, i32, ptr }
+	%struct._on_exit_args = type { [32 x ptr], [32 x ptr], i32, i32 }
+	%struct._reent = type { i32, ptr, ptr, ptr, i32, [25 x i8], i32, ptr, i32, ptr, ptr, i32, ptr, ptr, i32, ptr, { { [30 x ptr], [30 x i32] } }, ptr, %struct._atexit, ptr, %struct._glue, [3 x %struct.__FILE] }
+ at _impure_ptr = external global ptr		; <ptr> [#uses=1]
 
-define double @_erand48_r(%struct._reent* %r, i16* %xseed) nounwind {
+define double @_erand48_r(ptr %r, ptr %xseed) nounwind {
 entry:
-	tail call void @__dorand48( %struct._reent* %r, i16* %xseed ) nounwind
-	load i16, i16* %xseed, align 2		; <i16>:0 [#uses=1]
+	tail call void @__dorand48( ptr %r, ptr %xseed ) nounwind
+	load i16, ptr %xseed, align 2		; <i16>:0 [#uses=1]
 	uitofp i16 %0 to double		; <double>:1 [#uses=1]
 	tail call double @ldexp( double %1, i32 -48 ) nounwind		; <double>:2 [#uses=1]
-	getelementptr i16, i16* %xseed, i32 1		; <i16*>:3 [#uses=1]
-	load i16, i16* %3, align 2		; <i16>:4 [#uses=1]
+	getelementptr i16, ptr %xseed, i32 1		; <ptr>:3 [#uses=1]
+	load i16, ptr %3, align 2		; <i16>:4 [#uses=1]
 	uitofp i16 %4 to double		; <double>:5 [#uses=1]
 	tail call double @ldexp( double %5, i32 -32 ) nounwind		; <double>:6 [#uses=1]
 	fadd double %2, %6		; <double>:7 [#uses=1]
-	getelementptr i16, i16* %xseed, i32 2		; <i16*>:8 [#uses=1]
-	load i16, i16* %8, align 2		; <i16>:9 [#uses=1]
+	getelementptr i16, ptr %xseed, i32 2		; <ptr>:8 [#uses=1]
+	load i16, ptr %8, align 2		; <i16>:9 [#uses=1]
 	uitofp i16 %9 to double		; <double>:10 [#uses=1]
 	tail call double @ldexp( double %10, i32 -16 ) nounwind		; <double>:11 [#uses=1]
 	fadd double %7, %11		; <double>:12 [#uses=1]
 	ret double %12
 }
 
-declare void @__dorand48(%struct._reent*, i16*)
+declare void @__dorand48(ptr, ptr)
 
 declare double @ldexp(double, i32)
 
-define double @erand48(i16* %xseed) nounwind {
+define double @erand48(ptr %xseed) nounwind {
 entry:
-	load %struct._reent*, %struct._reent** @_impure_ptr, align 4		; <%struct._reent*>:0 [#uses=1]
-	tail call void @__dorand48( %struct._reent* %0, i16* %xseed ) nounwind
-	load i16, i16* %xseed, align 2		; <i16>:1 [#uses=1]
+	load ptr, ptr @_impure_ptr, align 4		; <ptr>:0 [#uses=1]
+	tail call void @__dorand48( ptr %0, ptr %xseed ) nounwind
+	load i16, ptr %xseed, align 2		; <i16>:1 [#uses=1]
 	uitofp i16 %1 to double		; <double>:2 [#uses=1]
 	tail call double @ldexp( double %2, i32 -48 ) nounwind		; <double>:3 [#uses=1]
-	getelementptr i16, i16* %xseed, i32 1		; <i16*>:4 [#uses=1]
-	load i16, i16* %4, align 2		; <i16>:5 [#uses=1]
+	getelementptr i16, ptr %xseed, i32 1		; <ptr>:4 [#uses=1]
+	load i16, ptr %4, align 2		; <i16>:5 [#uses=1]
 	uitofp i16 %5 to double		; <double>:6 [#uses=1]
 	tail call double @ldexp( double %6, i32 -32 ) nounwind		; <double>:7 [#uses=1]
 	fadd double %3, %7		; <double>:8 [#uses=1]
-	getelementptr i16, i16* %xseed, i32 2		; <i16*>:9 [#uses=1]
-	load i16, i16* %9, align 2		; <i16>:10 [#uses=1]
+	getelementptr i16, ptr %xseed, i32 2		; <ptr>:9 [#uses=1]
+	load i16, ptr %9, align 2		; <i16>:10 [#uses=1]
 	uitofp i16 %10 to double		; <double>:11 [#uses=1]
 	tail call double @ldexp( double %11, i32 -16 ) nounwind		; <double>:12 [#uses=1]
 	fadd double %8, %12		; <double>:13 [#uses=1]

diff  --git a/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll b/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll
index 15bcbd99a2739..08e45155e8af1 100644
--- a/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll
+++ b/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll
@@ -13,9 +13,9 @@
 
 define i32 @main() nounwind readnone {
 entry:
-  %x = alloca i32, align 4                        ; <i32*> [#uses=2]
-  store volatile i32 2, i32* %x, align 4
-  %0 = load volatile i32, i32* %x, align 4             ; <i32> [#uses=1]
+  %x = alloca i32, align 4                        ; <ptr> [#uses=2]
+  store volatile i32 2, ptr %x, align 4
+  %0 = load volatile i32, ptr %x, align 4             ; <i32> [#uses=1]
 ; STATIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2
 ; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0)
 ; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]]

diff  --git a/llvm/test/CodeGen/Mips/2011-05-26-BranchKillsVreg.ll b/llvm/test/CodeGen/Mips/2011-05-26-BranchKillsVreg.ll
index 1255949740f76..281de4bed3120 100644
--- a/llvm/test/CodeGen/Mips/2011-05-26-BranchKillsVreg.ll
+++ b/llvm/test/CodeGen/Mips/2011-05-26-BranchKillsVreg.ll
@@ -9,7 +9,7 @@
 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-n32"
 target triple = "mips-ellcc-linux"
 
-define i32 @mergesort(i8* %base, i32 %nmemb, i32 %size, i32 (i8*, i8*)* nocapture %cmp) nounwind {
+define i32 @mergesort(ptr %base, i32 %nmemb, i32 %size, ptr nocapture %cmp) nounwind {
 entry:
   br i1 undef, label %return, label %if.end13
 
@@ -17,19 +17,19 @@ if.end13:                                         ; preds = %entry
   br label %while.body
 
 while.body:                                       ; preds = %while.body, %if.end13
-  %list1.0482 = phi i8* [ %base, %if.end13 ], [ null, %while.body ]
+  %list1.0482 = phi ptr [ %base, %if.end13 ], [ null, %while.body ]
   br i1 undef, label %while.end415, label %while.body
 
 while.end415:                                     ; preds = %while.body
   br i1 undef, label %if.then419, label %if.end427
 
 if.then419:                                       ; preds = %while.end415
-  %call425 = tail call i8* @memmove(i8* %list1.0482, i8* undef, i32 undef) nounwind
+  %call425 = tail call ptr @memmove(ptr %list1.0482, ptr undef, i32 undef) nounwind
   br label %if.end427
 
 if.end427:                                        ; preds = %if.then419, %while.end415
-  %list2.1 = phi i8* [ undef, %if.then419 ], [ %list1.0482, %while.end415 ]
-  tail call void @free(i8* %list2.1)
+  %list2.1 = phi ptr [ undef, %if.then419 ], [ %list1.0482, %while.end415 ]
+  tail call void @free(ptr %list2.1)
   unreachable
 
 return:                                           ; preds = %entry
@@ -37,7 +37,7 @@ return:                                           ; preds = %entry
 }
 
 
-declare i8* @memmove(i8*, i8*, i32)
+declare ptr @memmove(ptr, ptr, i32)
 
-declare void @free(i8*)
+declare void @free(ptr)
 

diff  --git a/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll b/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll
index 2964b19c1d33a..745d57c8e9a2d 100644
--- a/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll
+++ b/llvm/test/CodeGen/Mips/2012-12-12-ExpandMemcpy.ll
@@ -2,10 +2,10 @@
 
 @.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
 
-define void @t(i8* %ptr) {
+define void @t(ptr %ptr) {
 entry:
-  tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %ptr, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 7, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i64(ptr %ptr, ptr @.str, i64 7, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll
index ceafb32293974..2f0f1a04a5588 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll
@@ -10,12 +10,12 @@
 ; Function Attrs: nounwind
 define void @br() #0 {
 entry:
-  %0 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @b, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  store i32 6754, i32* @i, align 4
+  store i32 6754, ptr @i, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/bswap1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/bswap1.ll
index 7bfc30c716f7b..bd762a0e1d741 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/bswap1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/bswap1.ll
@@ -26,9 +26,9 @@ define void @b16() {
 
   ; 32R2:           wsbh  $[[RESULT:[0-9]+]], $[[A_VAL]]
 
-  %1 = load i16, i16* @a, align 2
+  %1 = load i16, ptr @a, align 2
   %2 = call i16 @llvm.bswap.i16(i16 %1)
-  store i16 %2, i16* @a1, align 2
+  store i16 %2, ptr @a1, align 2
   ret void
 }
 
@@ -51,8 +51,8 @@ define void @b32() {
   ; 32R2:           wsbh  $[[TMP:[0-9]+]], $[[B_VAL]]
   ; 32R2:           rotr  $[[RESULT:[0-9]+]], $[[TMP]], 16
 
-  %1 = load i32, i32* @b, align 4
+  %1 = load i32, ptr @b, align 4
   %2 = call i32 @llvm.bswap.i32(i32 %1)
-  store i32 %2, i32* @b1, align 4
+  store i32 %2, ptr @b1, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll b/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll
index 0698e88ddb7a5..1fd1ea42a0326 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll
@@ -260,13 +260,13 @@ define void @cxiiiiconv() {
   ; ALL-DAG:        lhu     $[[REG_US1:[0-9]+]], 0($[[REG_US1_ADDR]])
   ; ALL-DAG:        andi    $7, $[[REG_US1]], 65535
   ; ALL:            jalr    $25
-  %1 = load i8, i8* @c1, align 1
+  %1 = load i8, ptr @c1, align 1
   %conv = sext i8 %1 to i32
-  %2 = load i8, i8* @uc1, align 1
+  %2 = load i8, ptr @uc1, align 1
   %conv1 = zext i8 %2 to i32
-  %3 = load i16, i16* @s1, align 2
+  %3 = load i16, ptr @s1, align 2
   %conv2 = sext i16 %3 to i32
-  %4 = load i16, i16* @us1, align 2
+  %4 = load i16, ptr @us1, align 2
   %conv3 = zext i16 %4 to i32
   call void @xiiii(i32 %conv, i32 %conv1, i32 %conv2, i32 %conv3)
   ret void

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/constexpr-address.ll b/llvm/test/CodeGen/Mips/Fast-ISel/constexpr-address.ll
index f27791a9241e4..f89802d1a11c0 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/constexpr-address.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/constexpr-address.ll
@@ -13,6 +13,6 @@ define void @foo() {
 ; CHECK:        sw      $[[T0]], 8($[[ARR]])
 
 entry:
-  store i32 12345, i32* getelementptr inbounds ([10 x i32], [10 x i32]* @ARR, i32 0, i32 2), align 4
+  store i32 12345, ptr getelementptr inbounds ([10 x i32], ptr @ARR, i32 0, i32 2), align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/div1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/div1.ll
index f565af258f265..effa047383882 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/div1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/div1.ll
@@ -25,10 +25,10 @@ define void @divs() {
   ; CHECK-DAG:    teq     $[[K]], $zero, 7
   ; CHECK-DAG:    mflo    $[[RESULT:[0-9]+]]
   ; CHECK:        sw      $[[RESULT]], 0($[[I_ADDR]])
-  %1 = load i32, i32* @sj, align 4
-  %2 = load i32, i32* @sk, align 4
+  %1 = load i32, ptr @sj, align 4
+  %2 = load i32, ptr @sk, align 4
   %div = sdiv i32 %1, %2
-  store i32 %div, i32* @si, align 4
+  store i32 %div, ptr @si, align 4
   ret void
 }
 
@@ -47,9 +47,9 @@ define void @divu() {
   ; CHECK-DAG:        teq     $[[K]], $zero, 7
   ; CHECK-DAG:        mflo    $[[RESULT:[0-9]+]]
   ; CHECK:            sw      $[[RESULT]], 0($[[I_ADDR]])
-  %1 = load i32, i32* @uj, align 4
-  %2 = load i32, i32* @uk, align 4
+  %1 = load i32, ptr @uj, align 4
+  %2 = load i32, ptr @uk, align 4
   %div = udiv i32 %1, %2
-  store i32 %div, i32* @ui, align 4
+  store i32 %div, ptr @ui, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/double-arg.ll b/llvm/test/CodeGen/Mips/Fast-ISel/double-arg.ll
index 84a284a5bf217..16cba99ad0657 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/double-arg.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/double-arg.ll
@@ -9,6 +9,6 @@ entry:
 ; CHECK-LABEL: f:
 ; CHECK: sdc1
   %value.addr = alloca double, align 8
-  store double %value, double* %value.addr, align 8
+  store double %value, ptr %value.addr, align 8
   ret i1 false
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fast-isel-softfloat-lower-args.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fast-isel-softfloat-lower-args.ll
index 18bc397480e3c..b69000fc625ea 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fast-isel-softfloat-lower-args.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fast-isel-softfloat-lower-args.ll
@@ -6,6 +6,6 @@
 define void @__signbit(double %__x) {
 entry:
   %__x.addr = alloca double, align 8
-  store double %__x, double* %__x.addr, align 8
+  store double %__x, ptr %__x.addr, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
index a7a537e68a7eb..d7913557c7d42 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fastalloca.ll
@@ -11,17 +11,15 @@ entry:
   %retval = alloca i32, align 4
   %x.addr = alloca i32, align 4
   %a = alloca %struct.x, align 4
-  %c = alloca %struct.x*, align 4
-  store i32 %x, i32* %x.addr, align 4
-  %x1 = getelementptr inbounds %struct.x, %struct.x* %a, i32 0, i32 0
-  %0 = load i32, i32* %x.addr, align 4
-  store i32 %0, i32* %x1, align 4
-  store %struct.x* %a, %struct.x** %c, align 4
-  %1 = load %struct.x*, %struct.x** %c, align 4
-  %x2 = getelementptr inbounds %struct.x, %struct.x* %1, i32 0, i32 0
-  %2 = load i32, i32* %x2, align 4
-  store i32 %2, i32* @i, align 4
-  %3 = load i32, i32* %retval
+  %c = alloca ptr, align 4
+  store i32 %x, ptr %x.addr, align 4
+  %0 = load i32, ptr %x.addr, align 4
+  store i32 %0, ptr %a, align 4
+  store ptr %a, ptr %c, align 4
+  %1 = load ptr, ptr %c, align 4
+  %2 = load i32, ptr %1, align 4
+  store i32 %2, ptr @i, align 4
+  %3 = load i32, ptr %retval
 ; CHECK:        addiu   $[[A_ADDR:[0-9]+]], $sp, 8
 ; CHECK-DAG:    lw      $[[I_ADDR:[0-9]+]], %got(i)($[[REG_GP:[0-9]+]])
 ; CHECK-DAG:    sw      $[[A_ADDR]], [[A_ADDR_FI:[0-9]+]]($sp)

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
index 67b49c6a8ea27..95270ff6b272d 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
@@ -12,8 +12,8 @@
 ; Function Attrs: nounwind
 define void @feq1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp oeq float %0, %1
 ; CHECK-LABEL:  feq1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -26,15 +26,15 @@ entry:
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
 
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @fne1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp une float %0, %1
 ; CHECK-LABEL:  fne1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -46,15 +46,15 @@ entry:
 ; CHECK:        c.eq.s  $f[[REG_F1]], $f[[REG_F2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @flt1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp olt float %0, %1
 ; CHECK-LABEL:  flt1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -67,15 +67,15 @@ entry:
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
 
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @fgt1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp ogt float %0, %1
 ; CHECK-LABEL: fgt1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -87,15 +87,15 @@ entry:
 ; CHECK:        c.ule.s  $f[[REG_F1]], $f[[REG_F2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @fle1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp ole float %0, %1
 ; CHECK-LABEL:  fle1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -107,15 +107,15 @@ entry:
 ; CHECK:        c.ole.s  $f[[REG_F1]], $f[[REG_F2]]
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @fge1()  {
 entry:
-  %0 = load float, float* @f1, align 4
-  %1 = load float, float* @f2, align 4
+  %0 = load float, ptr @f1, align 4
+  %1 = load float, ptr @f2, align 4
   %cmp = fcmp oge float %0, %1
 ; CHECK-LABEL:  fge1:
 ; CHECK:        lw      $[[REG_F1_GOT:[0-9]+]], %got(f1)(${{[0-9]+}})
@@ -127,15 +127,15 @@ entry:
 ; CHECK:        c.ult.s  $f[[REG_F1]], $f[[REG_F2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @deq1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp oeq double %0, %1
 ; CHECK-LABEL:  deq1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -147,15 +147,15 @@ entry:
 ; CHECK:        c.eq.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @dne1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp une double %0, %1
 ; CHECK-LABEL:  dne1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -167,15 +167,15 @@ entry:
 ; CHECK:        c.eq.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @dlt1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp olt double %0, %1
 ; CHECK-LABEL:  dlt1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -187,15 +187,15 @@ entry:
 ; CHECK:        c.olt.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @dgt1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp ogt double %0, %1
 ; CHECK-LABEL:  dgt1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -207,15 +207,15 @@ entry:
 ; CHECK:        c.ule.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @dle1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp ole double %0, %1
 ; CHECK-LABEL:  dle1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -227,15 +227,15 @@ entry:
 ; CHECK:        c.ole.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movt  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @dge1()  {
 entry:
-  %0 = load double, double* @d1, align 8
-  %1 = load double, double* @d2, align 8
+  %0 = load double, ptr @d1, align 8
+  %1 = load double, ptr @d2, align 8
   %cmp = fcmp oge double %0, %1
 ; CHECK-LABEL:  dge1:
 ; CHECK:        lw      $[[REG_D1_GOT:[0-9]+]], %got(d1)(${{[0-9]+}})
@@ -247,7 +247,7 @@ entry:
 ; CHECK:        c.ult.d  $f[[REG_D1]], $f[[REG_D2]]
 ; CHECK:        movf  $[[REG_ZERO]], $[[REG_ONE]], $fcc0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll
index d9637af926350..fbd5039f20a1a 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll
@@ -10,10 +10,10 @@
 ; Function Attrs: nounwind
 define void @dv() #0 {
 entry:
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   %conv = fpext float %0 to double
 ; CHECK: cvt.d.s  $f{{[0-9]+}}, $f{{[0-9]+}}
-  store double %conv, double* @d_f, align 8
+  store double %conv, ptr @d_f, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
index 9a9570f21b3aa..67b509938c40a 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
@@ -14,11 +14,11 @@
 define void @ifv() {
 entry:
 ; CHECK-LABEL:   .ent  ifv
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   %conv = fptosi float %0 to i32
 ; CHECK:   trunc.w.s  $f[[REG:[0-9]+]], $f{{[0-9]+}}
 ; CHECK:   mfc1	${{[0-9]+}}, $f[[REG]]
-  store i32 %conv, i32* @i_f, align 4
+  store i32 %conv, ptr @i_f, align 4
   ret void
 }
 
@@ -26,10 +26,10 @@ entry:
 define void @idv() {
 entry:
 ; CHECK-LABEL:   .ent  idv
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fptosi double %0 to i32
 ; CHECK:   trunc.w.d  $f[[REG:[0-9]+]], $f{{[0-9]+}}
 ; CHECK:   mfc1	${{[0-9]+}}, $f[[REG]]
-  store i32 %conv, i32* @i_d, align 4
+  store i32 %conv, ptr @i_d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
index 61828737aabcb..0a414316fa1b7 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
@@ -10,10 +10,10 @@
 ; Function Attrs: nounwind
 define void @fv() #0 {
 entry:
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   %conv = fptrunc double %0 to float
 ; CHECK: cvt.s.d  $f{{[0-9]+}}, $f{{[0-9]+}}
-  store float %conv, float* @f, align 4
+  store float %conv, ptr @f, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll b/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll
index e9bf30ed7b1d8..53783c11785fb 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll
@@ -14,8 +14,8 @@ define void @eq()  {
 entry:
 ; CHECK-LABEL:  .ent  eq
 
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp eq i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -27,7 +27,7 @@ entry:
 ; FIXME: This instruction is redundant. The sltiu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG2]], 1
 
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -35,8 +35,8 @@ entry:
 define void @ne()  {
 entry:
 ; CHECK-LABEL:  .ent  ne
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp ne i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -48,7 +48,7 @@ entry:
 ; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG2]], 1
 
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -56,8 +56,8 @@ entry:
 define void @ugt()  {
 entry:
 ; CHECK-LABEL:  .ent  ugt
-  %0 = load i32, i32* @uc, align 4
-  %1 = load i32, i32* @ud, align 4
+  %0 = load i32, ptr @uc, align 4
+  %1 = load i32, ptr @ud, align 4
   %cmp = icmp ugt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK:  lw	$[[REG_UC_GOT:[0-9+]]], %got(uc)(${{[0-9]+}})
@@ -68,7 +68,7 @@ entry:
 ; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 1
 
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -76,8 +76,8 @@ entry:
 define void @ult()  {
 entry:
 ; CHECK-LABEL:  .ent  ult
-  %0 = load i32, i32* @uc, align 4
-  %1 = load i32, i32* @ud, align 4
+  %0 = load i32, ptr @uc, align 4
+  %1 = load i32, ptr @ud, align 4
   %cmp = icmp ult i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -87,7 +87,7 @@ entry:
 ; CHECK:  sltu  $[[REG1:[0-9]+]], $[[REG_UC]], $[[REG_UD]]
 ; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -95,8 +95,8 @@ entry:
 define void @uge()  {
 entry:
 ; CHECK-LABEL:  .ent  uge
-  %0 = load i32, i32* @uc, align 4
-  %1 = load i32, i32* @ud, align 4
+  %0 = load i32, ptr @uc, align 4
+  %1 = load i32, ptr @ud, align 4
   %cmp = icmp uge i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}})
@@ -107,7 +107,7 @@ entry:
 ; CHECK:  xori  $[[REG2:[0-9]+]], $[[REG1]], 1
 ; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG2]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -115,8 +115,8 @@ entry:
 define void @ule()  {
 entry:
 ; CHECK-LABEL:  .ent  ule
-  %0 = load i32, i32* @uc, align 4
-  %1 = load i32, i32* @ud, align 4
+  %0 = load i32, ptr @uc, align 4
+  %1 = load i32, ptr @ud, align 4
   %cmp = icmp ule i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK:  lw	$[[REG_UC_GOT:[0-9+]]], %got(uc)(${{[0-9]+}})
@@ -127,7 +127,7 @@ entry:
 ; CHECK:  xori  $[[REG2:[0-9]+]], $[[REG1]], 1
 ; FIXME: This instruction is redundant. The sltu can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG2]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -135,8 +135,8 @@ entry:
 define void @sgt()  {
 entry:
 ; CHECK-LABEL:  .ent sgt
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp sgt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK:  lw	$[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
@@ -146,7 +146,7 @@ entry:
 ; CHECK:  slt  $[[REG1:[0-9]+]], $[[REG_D]], $[[REG_C]]
 ; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -154,8 +154,8 @@ entry:
 define void @slt()  {
 entry:
 ; CHECK-LABEL:  .ent slt
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp slt i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
@@ -165,7 +165,7 @@ entry:
 ; CHECK:  slt  $[[REG1:[0-9]+]], $[[REG_C]], $[[REG_D]]
 ; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }
 
@@ -173,11 +173,11 @@ entry:
 define void @sge()  {
 entry:
 ; CHECK-LABEL:  .ent sge
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp sge i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
 ; CHECK-DAG:  lw	$[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}})
 ; CHECK-DAG:  lw	$[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
 ; CHECK-DAG:  lw	$[[REG_D:[0-9]+]], 0($[[REG_D_GOT]])
@@ -193,8 +193,8 @@ entry:
 define void @sle()  {
 entry:
 ; CHECK-LABEL:  .ent sle
-  %0 = load i32, i32* @c, align 4
-  %1 = load i32, i32* @d, align 4
+  %0 = load i32, ptr @c, align 4
+  %1 = load i32, ptr @d, align 4
   %cmp = icmp sle i32 %0, %1
   %conv = zext i1 %cmp to i32
 ; CHECK:  lw	$[[REG_C_GOT:[0-9+]]], %got(c)(${{[0-9]+}})
@@ -205,6 +205,6 @@ entry:
 ; CHECK:        xori    $[[REG2:[0-9]+]], $[[REG1]], 1
 ; FIXME: This instruction is redundant. The slt can only produce 0 and 1.
 ; CHECK:        andi    ${{[0-9]+}}, $[[REG2]], 1
-  store i32 %conv, i32* @b1, align 4
+  store i32 %conv, ptr @b1, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/icmpbr1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/icmpbr1.ll
index e44ab36532c5b..37e49c2e8a428 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/icmpbr1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/icmpbr1.ll
@@ -3,16 +3,16 @@
 ; RUN:     < %s -verify-machineinstrs | FileCheck %s
 
 
-define i32 @foobar(i32*) {
+define i32 @foobar(ptr) {
 bb0:
 ; CHECK-LABEL: foobar:
 ; CHECK:       # %bb.0: # %bb0
 ; CHECK:        lw $[[REG0:[0-9]+]], 0($4)
 ; CHECK-NEXT:   sltiu $[[REG1:[0-9]+]], $[[REG0]], 1
 ; CHECK:        sw $[[REG1]], [[SPILL:[0-9]+]]($sp) # 4-byte Folded Spill
-  %1 = load  i32, i32* %0 , align 4
+  %1 = load  i32, ptr %0 , align 4
   %2 = icmp eq i32 %1, 0
-  store atomic i32 0, i32* %0 monotonic, align 4
+  store atomic i32 0, ptr %0 monotonic, align 4
   br label %bb1
 bb1:
 ; CHECK:       # %bb.1: # %bb1

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/icmpi1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/icmpi1.ll
index 97d4eb8f7a3fa..66c1f4f30c153 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/icmpi1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/icmpi1.ll
@@ -2,12 +2,12 @@
 ; RUN:     < %s -verify-machineinstrs | FileCheck %s
 
 
-define zeroext i1 @foo(i8* nocapture readonly) {
+define zeroext i1 @foo(ptr nocapture readonly) {
 ; CHECK-LABEL: foo
 ; CHECK:         lbu $[[REG0:[0-9]+]], 0($4)
 ; CHECK-NEXT:    xori $[[REG1:[0-9]+]], $[[REG0]], 1
 ; CHECK-NEXT:    andi $2, $[[REG1]], 1
-  %2 = load i8, i8* %0, align 1
+  %2 = load i8, ptr %0, align 1
   %3 = trunc i8 %2 to i1
   %4 = icmp ne i1 %3, true
   ret i1 %4

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll b/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
index c059dbf415525..93865a9b5c12f 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
@@ -21,8 +21,8 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @cfoo() #0 {
 entry:
-  %0 = load i8, i8* @c2, align 1
-  store i8 %0, i8* @c1, align 1
+  %0 = load i8, ptr @c2, align 1
+  store i8 %0, ptr @c1, align 1
 ; CHECK-LABEL:	cfoo:
 ; CHECK:	lbu	$[[REGc:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:	sb	$[[REGc]], 0(${{[0-9]+}})
@@ -34,8 +34,8 @@ entry:
 ; Function Attrs: nounwind
 define void @sfoo() #0 {
 entry:
-  %0 = load i16, i16* @s2, align 2
-  store i16 %0, i16* @s1, align 2
+  %0 = load i16, ptr @s2, align 2
+  store i16 %0, ptr @s1, align 2
 ; CHECK-LABEL:	sfoo:
 ; CHECK:	lhu	$[[REGs:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:	sh	$[[REGs]], 0(${{[0-9]+}})
@@ -46,8 +46,8 @@ entry:
 ; Function Attrs: nounwind
 define void @ifoo() #0 {
 entry:
-  %0 = load i32, i32* @i2, align 4
-  store i32 %0, i32* @i1, align 4
+  %0 = load i32, ptr @i2, align 4
+  store i32 %0, ptr @i1, align 4
 ; CHECK-LABEL:	ifoo:
 ; CHECK:	lw	$[[REGi:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:	sw	$[[REGi]], 0(${{[0-9]+}})
@@ -58,8 +58,8 @@ entry:
 ; Function Attrs: nounwind
 define void @ffoo() #0 {
 entry:
-  %0 = load float, float* @f2, align 4
-  store float %0, float* @f1, align 4
+  %0 = load float, ptr @f2, align 4
+  store float %0, ptr @f1, align 4
 ; CHECK-LABEL:	ffoo:
 ; CHECK:	lwc1	$f[[REGf:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:	swc1	$f[[REGf]], 0(${{[0-9]+}})
@@ -71,8 +71,8 @@ entry:
 ; Function Attrs: nounwind
 define void @dfoo() #0 {
 entry:
-  %0 = load double, double* @d2, align 8
-  store double %0, double* @d1, align 8
+  %0 = load double, ptr @d2, align 8
+  store double %0, ptr @d1, align 8
 ; CHECK-LABEL:        dfoo:
 ; CHECK:        ldc1    $f[[REGd:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:        sdc1    $f[[REGd]], 0(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll b/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
index 7e52fb40842a7..2a75b808cb7ea 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
@@ -28,14 +28,14 @@
 define void @_Z3b_iv()  {
 entry:
 ; CHECK-LABEL:   .ent  _Z3b_iv
-  %0 = load i8, i8* @b1, align 1
+  %0 = load i8, ptr @b1, align 1
   %tobool = trunc i8 %0 to i1
   %frombool = zext i1 %tobool to i8
-  store i8 %frombool, i8* @b2, align 1
-  %1 = load i8, i8* @b2, align 1
+  store i8 %frombool, ptr @b2, align 1
+  %1 = load i8, ptr @b2, align 1
   %tobool1 = trunc i8 %1 to i1
   %conv = zext i1 %tobool1 to i32
-  store i32 %conv, i32* @i, align 4
+  store i32 %conv, ptr @i, align 4
 ; CHECK:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  $[[REG2:[0-9]+]], $[[REG1]], 1
 ; CHECK:  sb  $[[REG2]], 0(${{[0-9]+}})
@@ -51,15 +51,15 @@ define void @_Z4uc_iv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4uc_iv
 
-  %0 = load i8, i8* @uc1, align 1
+  %0 = load i8, ptr @uc1, align 1
   %conv = zext i8 %0 to i32
-  store i32 %conv, i32* @i, align 4
-  %1 = load i8, i8* @uc2, align 1
+  store i32 %conv, ptr @i, align 4
+  %1 = load i8, ptr @uc2, align 1
   %conv1 = zext i8 %1 to i32
 ; CHECK:   lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 255
 
-  store i32 %conv1, i32* @j, align 4
+  store i32 %conv1, ptr @j, align 4
   ret void
 ; CHECK:  .end  _Z4uc_iv
 
@@ -71,12 +71,12 @@ entry:
 ; mips32r2-LABEL:  .ent  _Z4sc_iv
 ; mips32-LABEL:  .ent  _Z4sc_iv
 
-  %0 = load i8, i8* @sc1, align 1
+  %0 = load i8, ptr @sc1, align 1
   %conv = sext i8 %0 to i32
-  store i32 %conv, i32* @i, align 4
-  %1 = load i8, i8* @sc2, align 1
+  store i32 %conv, ptr @i, align 4
+  %1 = load i8, ptr @sc2, align 1
   %conv1 = sext i8 %1 to i32
-  store i32 %conv1, i32* @j, align 4
+  store i32 %conv1, ptr @j, align 4
 ; mips32r2:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; mips32r2:  seb  ${{[0-9]+}}, $[[REG1]]
 ; mips32:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
@@ -91,12 +91,12 @@ entry:
 define void @_Z4us_iv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4us_iv
-  %0 = load i16, i16* @us1, align 2
+  %0 = load i16, ptr @us1, align 2
   %conv = zext i16 %0 to i32
-  store i32 %conv, i32* @i, align 4
-  %1 = load i16, i16* @us2, align 2
+  store i32 %conv, ptr @i, align 4
+  %1 = load i16, ptr @us2, align 2
   %conv1 = zext i16 %1 to i32
-  store i32 %conv1, i32* @j, align 4
+  store i32 %conv1, ptr @j, align 4
   ret void
 ; CHECK:  lhu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 65535
@@ -109,12 +109,12 @@ entry:
 ; mips32r2-LABEL:  .ent  _Z4ss_iv
 ; mips32=LABEL:  .ent  _Z4ss_iv
 
-  %0 = load i16, i16* @ss1, align 2
+  %0 = load i16, ptr @ss1, align 2
   %conv = sext i16 %0 to i32
-  store i32 %conv, i32* @i, align 4
-  %1 = load i16, i16* @ss2, align 2
+  store i32 %conv, ptr @i, align 4
+  %1 = load i16, ptr @ss2, align 2
   %conv1 = sext i16 %1 to i32
-  store i32 %conv1, i32* @j, align 4
+  store i32 %conv1, ptr @j, align 4
 ; mips32r2:  lhu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; mips32r2:  seh  ${{[0-9]+}}, $[[REG1]]
 ; mips32:    lhu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
@@ -129,10 +129,10 @@ entry:
 define void @_Z4b_ssv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z4b_ssv
-  %0 = load i8, i8* @b2, align 1
+  %0 = load i8, ptr @b2, align 1
   %tobool = trunc i8 %0 to i1
   %conv = zext i1 %tobool to i16
-  store i16 %conv, i16* @ssi, align 2
+  store i16 %conv, ptr @ssi, align 2
   ret void
 ; CHECK:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 1
@@ -143,15 +143,15 @@ entry:
 define void @_Z5uc_ssv()  {
 entry:
 ; CHECK-LABEL:  .ent  _Z5uc_ssv
-  %0 = load i8, i8* @uc1, align 1
+  %0 = load i8, ptr @uc1, align 1
   %conv = zext i8 %0 to i16
-  store i16 %conv, i16* @ssi, align 2
-  %1 = load i8, i8* @uc2, align 1
+  store i16 %conv, ptr @ssi, align 2
+  %1 = load i8, ptr @uc2, align 1
   %conv1 = zext i8 %1 to i16
 ; CHECK:   lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK:  andi  ${{[0-9]+}}, $[[REG1]], 255
 
-  store i16 %conv1, i16* @ssj, align 2
+  store i16 %conv1, ptr @ssj, align 2
   ret void
 ; CHECK:  .end  _Z5uc_ssv
 }
@@ -161,12 +161,12 @@ define void @_Z5sc_ssv()  {
 entry:
 ; mips32r2-LABEL:  .ent  _Z5sc_ssv
 ; mips32-LABEL:  .ent  _Z5sc_ssv
-  %0 = load i8, i8* @sc1, align 1
+  %0 = load i8, ptr @sc1, align 1
   %conv = sext i8 %0 to i16
-  store i16 %conv, i16* @ssi, align 2
-  %1 = load i8, i8* @sc2, align 1
+  store i16 %conv, ptr @ssi, align 2
+  %1 = load i8, ptr @sc2, align 1
   %conv1 = sext i8 %1 to i16
-  store i16 %conv1, i16* @ssj, align 2
+  store i16 %conv1, ptr @ssj, align 2
 ; mips32r2:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})
 ; mips32r2:  seb  ${{[0-9]+}}, $[[REG1]]
 ; mips32:  lbu  $[[REG1:[0-9]+]], 0(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll b/llvm/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll
index 072d15f434c0c..66b615bad4dc7 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/loadstrconst.ll
@@ -4,12 +4,12 @@
 ; RUN:     < %s | FileCheck %s
 
 @.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1
- at s = common global i8* null, align 4
+ at s = common global ptr null, align 4
 
 ; Function Attrs: nounwind
 define void @foo() #0 {
 entry:
-  store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** @s, align 4
+  store ptr @.str, ptr @s, align 4
   ret void
 ; CHECK:        .ent    foo
 ; CHECK:        lw      $[[REG1:[0-9]+]], %got($.str)(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/logopm.ll b/llvm/test/CodeGen/Mips/Fast-ISel/logopm.ll
index 65ca213235fe7..494dcd19a5b8a 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/logopm.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/logopm.ll
@@ -22,13 +22,13 @@
 ; Function Attrs: noinline nounwind
 define void @andUb() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1
-  %1 = load i8, i8* @ub2, align 1
+  %0 = load i8, ptr @ub1, align 1
+  %1 = load i8, ptr @ub2, align 1
   %conv0 = trunc i8 %0 to i1
   %conv1 = trunc i8 %1 to i1
   %and0 = and i1 %conv1, %conv0
   %conv3 = zext i1 %and0 to i8
-  store i8 %conv3, i8* @ub, align 1, !tbaa !2
+  store i8 %conv3, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUb
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -47,11 +47,11 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUb0() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1, !tbaa !2
+  %0 = load i8, ptr @ub1, align 1, !tbaa !2
   %conv = trunc i8 %0 to i1
   %and = and i1 %conv, 0
   %conv1 = zext i1 %and to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUb0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -70,11 +70,11 @@ entry:
 define void @andUb1() #0 {
 ; clang uses i8 constants for booleans, so we test with an i8 1.
 entry:
-  %x = load i8, i8* @ub1, align 1, !tbaa !2
+  %x = load i8, ptr @ub1, align 1, !tbaa !2
   %and = and i8 %x, 1
   %conv = trunc i8 %and to i1
   %conv1 = zext i1 %conv to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUb1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -93,13 +93,13 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUb() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1
-  %1 = load i8, i8* @ub2, align 1
+  %0 = load i8, ptr @ub1, align 1
+  %1 = load i8, ptr @ub2, align 1
   %conv0 = trunc i8 %0 to i1
   %conv1 = trunc i8 %1 to i1
   %or0 = or i1 %conv1, %conv0
   %conv3 = zext i1 %or0 to i8
-  store i8 %conv3, i8* @ub, align 1, !tbaa !2
+  store i8 %conv3, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUb
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -118,11 +118,11 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUb0() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1, !tbaa !2
+  %0 = load i8, ptr @ub1, align 1, !tbaa !2
   %conv = trunc i8 %0 to i1
   %or = or i1 %conv, 0
   %conv1 = zext i1 %or to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUb0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -139,11 +139,11 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUb1() #0 {
 entry:
-  %x = load i8, i8* @ub1, align 1, !tbaa !2
+  %x = load i8, ptr @ub1, align 1, !tbaa !2
   %or = or i8 %x, 1
   %conv = trunc i8 %or to i1
   %conv1 = zext i1 %conv to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUb1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -162,13 +162,13 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUb() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1
-  %1 = load i8, i8* @ub2, align 1
+  %0 = load i8, ptr @ub1, align 1
+  %1 = load i8, ptr @ub2, align 1
   %conv0 = trunc i8 %0 to i1
   %conv1 = trunc i8 %1 to i1
   %xor0 = xor i1 %conv1, %conv0
   %conv3 = zext i1 %xor0 to i8
-  store i8 %conv3, i8* @ub, align 1, !tbaa !2
+  store i8 %conv3, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL: .ent    xorUb
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -187,11 +187,11 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUb0() #0 {
 entry:
-  %0 = load i8, i8* @ub1, align 1, !tbaa !2
+  %0 = load i8, ptr @ub1, align 1, !tbaa !2
   %conv = trunc i8 %0 to i1
   %xor = xor i1 %conv, 0
   %conv1 = zext i1 %xor to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    xorUb0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -209,11 +209,11 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUb1() #0 {
 entry:
-  %x = load i8, i8* @ub1, align 1, !tbaa !2
+  %x = load i8, ptr @ub1, align 1, !tbaa !2
   %xor = xor i8 1, %x
   %conv = trunc i8 %xor to i1
   %conv1 = zext i1 %conv to i8
-  store i8 %conv1, i8* @ub, align 1, !tbaa !2
+  store i8 %conv1, ptr @ub, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    xorUb1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -232,10 +232,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUc() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
-  %1 = load i8, i8* @uc2, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
+  %1 = load i8, ptr @uc2, align 1, !tbaa !2
   %and3 = and i8 %1, %0
-  store i8 %and3, i8* @uc, align 1, !tbaa !2
+  store i8 %and3, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUc
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -253,9 +253,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUc0() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
   %and = and i8 %0, 67
-  store i8 %and, i8* @uc, align 1, !tbaa !2
+  store i8 %and, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUc0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -273,9 +273,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUc1() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
   %and = and i8 %0, 167
-  store i8 %and, i8* @uc, align 1, !tbaa !2
+  store i8 %and, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    andUc1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -293,10 +293,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUc() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
-  %1 = load i8, i8* @uc2, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
+  %1 = load i8, ptr @uc2, align 1, !tbaa !2
   %or3 = or i8 %1, %0
-  store i8 %or3, i8* @uc, align 1, !tbaa !2
+  store i8 %or3, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUc
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -315,9 +315,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUc0() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
    %or = or i8 %0, 69
-  store i8 %or, i8* @uc, align 1, !tbaa !2
+  store i8 %or, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUc0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -335,9 +335,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUc1() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
   %or = or i8 %0, 238
-  store i8 %or, i8* @uc, align 1, !tbaa !2
+  store i8 %or, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    orUc1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -355,10 +355,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUc() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
-  %1 = load i8, i8* @uc2, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
+  %1 = load i8, ptr @uc2, align 1, !tbaa !2
   %xor3 = xor i8 %1, %0
-  store i8 %xor3, i8* @uc, align 1, !tbaa !2
+  store i8 %xor3, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL: .ent    xorUc
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -377,9 +377,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUc0() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
   %xor = xor i8 %0, 23
-  store i8 %xor, i8* @uc, align 1, !tbaa !2
+  store i8 %xor, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    xorUc0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -397,9 +397,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUc1() #0 {
 entry:
-  %0 = load i8, i8* @uc1, align 1, !tbaa !2
+  %0 = load i8, ptr @uc1, align 1, !tbaa !2
   %xor = xor i8 %0, 120
-  store i8 %xor, i8* @uc, align 1, !tbaa !2
+  store i8 %xor, ptr @uc, align 1, !tbaa !2
 ; CHECK-LABEL:  .ent    xorUc1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -417,10 +417,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUs() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
-  %1 = load i16, i16* @us2, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
+  %1 = load i16, ptr @us2, align 2, !tbaa !5
   %and3 = and i16 %1, %0
-  store i16 %and3, i16* @us, align 2, !tbaa !5
+  store i16 %and3, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    andUs
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -439,9 +439,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUs0() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %and = and i16 %0, 4660
-  store i16 %and, i16* @us, align 2, !tbaa !5
+  store i16 %and, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL: .ent    andUs0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -459,9 +459,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @andUs1() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %and = and i16 %0, 61351
-  store i16 %and, i16* @us, align 2, !tbaa !5
+  store i16 %and, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    andUs1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -479,10 +479,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUs() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
-  %1 = load i16, i16* @us2, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
+  %1 = load i16, ptr @us2, align 2, !tbaa !5
   %or3 = or i16 %1, %0
-  store i16 %or3, i16* @us, align 2, !tbaa !5
+  store i16 %or3, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    orUs
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -501,18 +501,18 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @orUs0() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %or = or i16 %0, 17666
-  store i16 %or, i16* @us, align 2, !tbaa !5
+  store i16 %or, ptr @us, align 2, !tbaa !5
   ret void
 }
 
 ; Function Attrs: noinline nounwind
 define void @orUs1() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %or = or i16 %0, 60945
-  store i16 %or, i16* @us, align 2, !tbaa !5
+  store i16 %or, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    orUs1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -530,10 +530,10 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUs() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
-  %1 = load i16, i16* @us2, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
+  %1 = load i16, ptr @us2, align 2, !tbaa !5
   %xor3 = xor i16 %1, %0
-  store i16 %xor3, i16* @us, align 2, !tbaa !5
+  store i16 %xor3, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    xorUs
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -552,9 +552,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUs0() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %xor = xor i16 %0, 6062
-  store i16 %xor, i16* @us, align 2, !tbaa !5
+  store i16 %xor, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    xorUs0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -573,9 +573,9 @@ entry:
 ; Function Attrs: noinline nounwind
 define void @xorUs1() #0 {
 entry:
-  %0 = load i16, i16* @us1, align 2, !tbaa !5
+  %0 = load i16, ptr @us1, align 2, !tbaa !5
   %xor = xor i16 %0, 60024
-  store i16 %xor, i16* @us, align 2, !tbaa !5
+  store i16 %xor, ptr @us, align 2, !tbaa !5
 ; CHECK-LABEL:  .ent    xorUs1
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll
index 74acbf7098720..5dd625f9279bb 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/memtest1.ll
@@ -4,15 +4,15 @@
 ; RUN:     -fast-isel-abort=3 -verify-machineinstrs | FileCheck %s
 
 @str = private unnamed_addr constant [12 x i8] c"hello there\00", align 1
- at src = global i8* getelementptr inbounds ([12 x i8], [12 x i8]* @str, i32 0, i32 0), align 4
+ at src = global ptr @str, align 4
 @i = global i32 12, align 4
 @dest = common global [50 x i8] zeroinitializer, align 1
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1)
-declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memmove.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1)
 
-define void @cpy(i8* %src, i32 %i) {
+define void @cpy(ptr %src, i32 %i) {
   ; CHECK-LABEL:  cpy:
 
   ; CHECK:        lw    $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
@@ -20,11 +20,11 @@ define void @cpy(i8* %src, i32 %i) {
   ; CHECK:        jalr  $[[T2]]
   ; CHECK-NEXT:       nop
   ; CHECK-NOT:        {{.*}}$2{{.*}}
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr @dest, ptr %src, i32 %i, i1 false)
   ret void
 }
 
-define void @mov(i8* %src, i32 %i) {
+define void @mov(ptr %src, i32 %i) {
   ; CHECK-LABEL:  mov:
 
   ; CHECK:        lw    $[[T0:[0-9]+]], %got(dest)(${{[0-9]+}})
@@ -32,7 +32,7 @@ define void @mov(i8* %src, i32 %i) {
   ; CHECK:            jalr  $[[T2]]
   ; CHECK-NEXT:       nop
   ; CHECK-NOT:        {{.*}}$2{{.*}}
-  call void @llvm.memmove.p0i8.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8* %src, i32 %i, i1 false)
+  call void @llvm.memmove.p0.p0.i32(ptr @dest, ptr %src, i32 %i, i1 false)
   ret void
 }
 
@@ -44,6 +44,6 @@ define void @clear(i32 %i) {
   ; CHECK:            jalr  $[[T2]]
   ; CHECK-NEXT:       nop
   ; CHECK-NOT:        {{.*}}$2{{.*}}
-  call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @dest, i32 0, i32 0), i8 42, i32 %i, i1 false)
+  call void @llvm.memset.p0.i32(ptr @dest, i8 42, i32 %i, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll b/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll
index 6ee50fe3fe99a..2ed6117dd7338 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll
@@ -4,7 +4,7 @@
 ; RUN:     < %s | FileCheck %s
 
 @x = common global [128000 x float] zeroinitializer, align 4
- at y = global float* getelementptr inbounds ([128000 x float], [128000 x float]* @x, i32 0, i32 0), align 4
+ at y = global ptr @x, align 4
 @result = common global float 0.000000e+00, align 4
 @.str = private unnamed_addr constant [5 x i8] c"%f \0A\00", align 1
 
@@ -12,9 +12,9 @@
 define void @foo() {
 entry:
 ; CHECK-LABEL:   .ent  foo
-  %0 = load float*, float** @y, align 4
-  %arrayidx = getelementptr inbounds float, float* %0, i32 64000
-  store float 5.500000e+00, float* %arrayidx, align 4
+  %0 = load ptr, ptr @y, align 4
+  %arrayidx = getelementptr inbounds float, ptr %0, i32 64000
+  store float 5.500000e+00, ptr %arrayidx, align 4
 ; CHECK:        lw      $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
 ; CHECK:        lw      $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
 ; CHECK:        lui     $[[REG_FPCONST_INT:[0-9]+]], 16560
@@ -31,10 +31,10 @@ entry:
 define void @goo() {
 entry:
 ; CHECK-LABEL:   .ent  goo
-  %0 = load float*, float** @y, align 4
-  %arrayidx = getelementptr inbounds float, float* %0, i32 64000
-  %1 = load float, float* %arrayidx, align 4
-  store float %1, float* @result, align 4
+  %0 = load ptr, ptr @y, align 4
+  %arrayidx = getelementptr inbounds float, ptr %0, i32 64000
+  %1 = load float, ptr %arrayidx, align 4
+  store float %1, ptr @result, align 4
 ; CHECK-DAG:    lw      $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}})
 ; CHECK-DAG:    lw      $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}})
 ; CHECK-DAG:    lw      $[[REG_Y:[0-9]+]], 0($[[REG_Y_GOT]])
@@ -51,7 +51,7 @@ entry:
 ; Original C code for test.
 ;
 ;float x[128000];
-;float *y = x;
+;ptr y = x;
 ;float result;
 
 

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/pr40325.ll b/llvm/test/CodeGen/Mips/Fast-ISel/pr40325.ll
index e482a13f3d5cb..9e64d7b2fa039 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/pr40325.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/pr40325.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=mipsel -relocation-model=pic -O0 -mcpu=mips32 < %s | FileCheck %s
 
-define void @test(i32 %x, i1* %p) nounwind {
+define void @test(i32 %x, ptr %p) nounwind {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    move $1, $4
@@ -16,7 +16,7 @@ define void @test(i32 %x, i1* %p) nounwind {
 ; CHECK-NEXT:    nop
   %y = and i32 %x, 1
   %c = icmp eq i32 %y, 1
-  store i1 %c, i1* %p
+  store i1 %c, ptr %p
   br i1 %c, label %foo, label %foo
 
 foo:

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/rem1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/rem1.ll
index c8524a5b81e12..cc0fc47d1b874 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/rem1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/rem1.ll
@@ -25,10 +25,10 @@ define void @rems() {
   ; CHECK-DAG:        teq     $[[K]], $zero, 7
   ; CHECK-DAG:        mfhi    $[[RESULT:[0-9]+]]
   ; CHECK:            sw      $[[RESULT]], 0($[[I_ADDR]])
-  %1 = load i32, i32* @sj, align 4
-  %2 = load i32, i32* @sk, align 4
+  %1 = load i32, ptr @sj, align 4
+  %2 = load i32, ptr @sk, align 4
   %rem = srem i32 %1, %2
-  store i32 %rem, i32* @si, align 4
+  store i32 %rem, ptr @si, align 4
   ret void
 }
 
@@ -48,9 +48,9 @@ define void @remu() {
   ; CHECK-DAG:        teq     $[[K]], $zero, 7
   ; CHECK-DAG:        mfhi    $[[RESULT:[0-9]+]]
   ; CHECK:            sw      $[[RESULT]], 0($[[I_ADDR]])
-  %1 = load i32, i32* @uj, align 4
-  %2 = load i32, i32* @uk, align 4
+  %1 = load i32, ptr @uj, align 4
+  %2 = load i32, ptr @uk, align 4
   %rem = urem i32 %1, %2
-  store i32 %rem, i32* @ui, align 4
+  store i32 %rem, ptr @ui, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll b/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll
index d8d1222d3e3e0..c8023e97d5936 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll
@@ -11,7 +11,7 @@
 define i32 @reti() {
 entry:
 ; CHECK-LABEL: reti:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   ret i32 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -25,7 +25,7 @@ entry:
 define i16 @retus() {
 entry:
 ; CHECK-LABEL: retus:
-  %0 = load i16, i16* @s, align 2
+  %0 = load i16, ptr @s, align 2
   ret i16 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -39,7 +39,7 @@ entry:
 define signext i16 @rets() {
 entry:
 ; CHECK-LABEL: rets:
-  %0 = load i16, i16* @s, align 2
+  %0 = load i16, ptr @s, align 2
   ret i16 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -54,7 +54,7 @@ entry:
 define i8 @retuc() {
 entry:
 ; CHECK-LABEL: retuc:
-  %0 = load i8, i8* @c, align 1
+  %0 = load i8, ptr @c, align 1
   ret i8 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -68,7 +68,7 @@ entry:
 define signext i8 @retc() {
 entry:
 ; CHECK-LABEL: retc:
-  %0 = load i8, i8* @c, align 1
+  %0 = load i8, ptr @c, align 1
   ret i8 %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -83,7 +83,7 @@ entry:
 define float @retf() {
 entry:
 ; CHECK-LABEL: retf:
-  %0 = load float, float* @f, align 4
+  %0 = load float, ptr @f, align 4
   ret float %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -97,7 +97,7 @@ entry:
 define double @retd() {
 entry:
 ; CHECK-LABEL: retd:
-  %0 = load double, double* @d, align 8
+  %0 = load double, ptr @d, align 8
   ret double %0
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK:        addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/shftopm.ll b/llvm/test/CodeGen/Mips/Fast-ISel/shftopm.ll
index 871accfff2fef..923ccaede29d5 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/shftopm.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/shftopm.ll
@@ -12,10 +12,10 @@
 
 define void @sll() {
 entry:
-  %0 = load i16, i16* @s1, align 2
-  %1 = load i16, i16* @s2, align 2
+  %0 = load i16, ptr @s1, align 2
+  %1 = load i16, ptr @s2, align 2
   %shl = shl i16 %0, %1
-  store i16 %shl, i16* @s3, align 2
+  store i16 %shl, ptr @s3, align 2
 ; CHECK-LABEL:  sll:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK-DAG:    addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -32,9 +32,9 @@ entry:
 
 define void @slli() {
 entry:
-  %0 = load i16, i16* @s1, align 2
+  %0 = load i16, ptr @s1, align 2
   %shl = shl i16 %0, 5
-  store i16 %shl, i16* @s3, align 2
+  store i16 %shl, ptr @s3, align 2
 ; CHECK-LABEL:  slli:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK-DAG:    addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -49,10 +49,10 @@ entry:
 
 define void @srl() {
 entry:
-  %0 = load i16, i16* @us1, align 2
-  %1 = load i16, i16* @us2, align 2
+  %0 = load i16, ptr @us1, align 2
+  %1 = load i16, ptr @us2, align 2
   %shr = lshr i16 %0, %1
-  store i16 %shr, i16* @us3, align 2
+  store i16 %shr, ptr @us3, align 2
   ret void
 ; CHECK-LABEL:  srl:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
@@ -69,9 +69,9 @@ entry:
 
 define void @srli() {
 entry:
-  %0 = load i16, i16* @us1, align 2
+  %0 = load i16, ptr @us1, align 2
   %shr = lshr i16 %0, 4
-  store i16 %shr, i16* @us3, align 2
+  store i16 %shr, ptr @us3, align 2
 ; CHECK-LABEL:  srli:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK-DAG:    addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -86,10 +86,10 @@ entry:
 
 define void @sra() {
 entry:
-  %0 = load i16, i16* @s1, align 2
-  %1 = load i16, i16* @s2, align 2
+  %0 = load i16, ptr @s1, align 2
+  %1 = load i16, ptr @s2, align 2
   %shr = ashr i16 %0, %1
-  store i16 %shr, i16* @s3, align 2
+  store i16 %shr, ptr @s3, align 2
 ; CHECK-LABEL:  sra:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK-DAG:    addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)
@@ -106,9 +106,9 @@ entry:
 
 define void @srai() {
 entry:
-  %0 = load i16, i16* @s1, align 2
+  %0 = load i16, ptr @s1, align 2
   %shr = ashr i16 %0, 2
-  store i16 %shr, i16* @s3, align 2
+  store i16 %shr, ptr @s3, align 2
 ; CHECK-LABEL:  srai:
 ; CHECK:        lui     $[[REG_GPa:[0-9]+]], %hi(_gp_disp)
 ; CHECK-DAG:    addiu   $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp)

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll b/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll
index 651fb6ad1f5ac..e3edf245e9816 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll
@@ -8,8 +8,7 @@
 define i32 @main() nounwind uwtable {
 entry:
   %foo = alloca %struct.s, align 4
-  %0 = bitcast %struct.s* %foo to i32*
-  %bf.load = load i32, i32* %0, align 4
+  %bf.load = load i32, ptr %foo, align 4
   %bf.lshr = lshr i32 %bf.load, 2
   %cmp = icmp ne i32 %bf.lshr, 2
   br i1 %cmp, label %if.then, label %if.end

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/simplestore.ll b/llvm/test/CodeGen/Mips/Fast-ISel/simplestore.ll
index 1379390039a7c..d029ea4973b41 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/simplestore.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/simplestore.ll
@@ -8,7 +8,7 @@
 ; Function Attrs: nounwind
 define void @foo()  {
 entry:
-  store i32 12345, i32* @abcd, align 4
+  store i32 12345, ptr @abcd, align 4
 ; CHECK: 	addiu	$[[REG1:[0-9]+]], $zero, 12345
 ; CHECK: 	lw	$[[REG2:[0-9]+]], %got(abcd)(${{[0-9]+}})
 ; CHECK: 	sw	$[[REG1]], 0($[[REG2]])

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
index d1df4569c4756..1957453f55068 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll
@@ -13,7 +13,7 @@
 ; Function Attrs: nounwind
 define void @f1() #0 {
 entry:
-  store float 0x3FFA76C8C0000000, float* @f, align 4
+  store float 0x3FFA76C8C0000000, ptr @f, align 4
   ret void
 ; CHECK:  .ent  f1
 ; CHECK:  lui  $[[REG1:[0-9]+]], 16339
@@ -28,7 +28,7 @@ entry:
 ; Function Attrs: nounwind
 define void @d1() #0 {
 entry:
-  store double 1.234567e+00, double* @de, align 8
+  store double 1.234567e+00, ptr @de, align 8
 ; mip32r2:  .ent  d1
 ; mips32r2:  lui  $[[REG1a:[0-9]+]], 16371
 ; mips32r2:  ori  $[[REG2a:[0-9]+]], $[[REG1a]], 49353

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/simplestorei.ll b/llvm/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
index 1a128bb184b92..2f73d1cb890c0 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/simplestorei.ll
@@ -8,7 +8,7 @@
 ; Function Attrs: nounwind
 define void @si2_1() #0 {
 entry:
-  store i32 32767, i32* @ijk, align 4
+  store i32 32767, ptr @ijk, align 4
 ; CHECK:        .ent    si2_1
 ; CHECK:        addiu   $[[REG1:[0-9]+]], $zero, 32767
 ; CHECK:        lw      $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
@@ -20,7 +20,7 @@ entry:
 ; Function Attrs: nounwind
 define void @si2_2() #0 {
 entry:
-  store i32 -32768, i32* @ijk, align 4
+  store i32 -32768, ptr @ijk, align 4
 ; CHECK:        .ent    si2_2
 ; CHECK:        lui     $[[REG1:[0-9]+]], 65535
 ; CHECK:        ori     $[[REG2:[0-9]+]], $[[REG1]], 32768
@@ -32,7 +32,7 @@ entry:
 ; Function Attrs: nounwind
 define void @ui2_1() #0 {
 entry:
-  store i32 65535, i32* @ijk, align 4
+  store i32 65535, ptr @ijk, align 4
 ; CHECK:        .ent    ui2_1
 ; CHECK:        ori     $[[REG1:[0-9]+]], $zero, 65535
 ; CHECK:        lw      $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
@@ -43,7 +43,7 @@ entry:
 ; Function Attrs: nounwind
 define void @ui4_1() #0 {
 entry:
-  store i32 983040, i32* @ijk, align 4
+  store i32 983040, ptr @ijk, align 4
 ; CHECK:        .ent    ui4_1
 ; CHECK:        lui     $[[REG1:[0-9]+]], 15
 ; CHECK:        lw      $[[REG2:[0-9]+]], %got(ijk)(${{[0-9]+}})
@@ -54,7 +54,7 @@ entry:
 ; Function Attrs: nounwind
 define void @ui4_2() #0 {
 entry:
-  store i32 719566, i32* @ijk, align 4
+  store i32 719566, ptr @ijk, align 4
 ; CHECK:        .ent    ui4_2
 ; CHECK:        lui	$[[REG1:[0-9]+]], 10
 ; CHECK: 	ori	$[[REG1]], $[[REG1]], 64206

diff  --git a/llvm/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll b/llvm/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
index 982284b64f663..f0974d580fe70 100644
--- a/llvm/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
+++ b/llvm/test/CodeGen/Mips/Fast-ISel/stackloadstore.ll
@@ -5,14 +5,14 @@
 
 define i16 @test() {
   %a = alloca [4 x i16], align 4
-  %arrayidx = getelementptr inbounds [4 x i16], [4 x i16]* %a, i32 0, i32 -2
-  %b = load i16, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds [4 x i16], ptr %a, i32 0, i32 -2
+  %b = load i16, ptr %arrayidx, align 2
   ret i16 %b
 }
 
 define void @test2() {
   %a = alloca [4 x i16], align 4
-  %arrayidx = getelementptr inbounds [4 x i16], [4 x i16]* %a, i32 0, i32 -2
-  store i16 2, i16* %arrayidx, align 2
+  %arrayidx = getelementptr inbounds [4 x i16], ptr %a, i32 0, i32 -2
+  store i16 2, ptr %arrayidx, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
index d2000899b9c7d..aef30bbb717c4 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/aggregate_struct_return.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 
-define { float, float } @add_complex_float({ float, float }* %a, { float, float }* %b) {
+define { float, float } @add_complex_float(ptr %a, ptr %b) {
   ; MIPS32-LABEL: name: add_complex_float
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0, $a1
@@ -23,14 +23,14 @@ define { float, float } @add_complex_float({ float, float }* %a, { float, float
   ; MIPS32:   $f2 = COPY [[FADD1]](s32)
   ; MIPS32:   RetRA implicit $f0, implicit $f2
 entry:
-  %.realp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 0
-  %.real = load float, float* %.realp, align 4
-  %.imagp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 1
-  %.imag = load float, float* %.imagp, align 4
-  %.realp1 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 0
-  %.real2 = load float, float* %.realp1, align 4
-  %.imagp3 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 1
-  %.imag4 = load float, float* %.imagp3, align 4
+  %.realp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 0
+  %.real = load float, ptr %.realp, align 4
+  %.imagp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 1
+  %.imag = load float, ptr %.imagp, align 4
+  %.realp1 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 0
+  %.real2 = load float, ptr %.realp1, align 4
+  %.imagp3 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 1
+  %.imag4 = load float, ptr %.imagp3, align 4
   %add.r = fadd float %.real, %.real2
   %add.i = fadd float %.imag, %.imag4
   %.fca.0.insert = insertvalue { float, float } undef, float %add.r, 0
@@ -38,7 +38,7 @@ entry:
   ret { float, float } %.fca.1.insert
 }
 
-define { double, double } @add_complex_double({ double, double }* %a, { double, double }* %b) {
+define { double, double } @add_complex_double(ptr %a, ptr %b) {
   ; MIPS32-LABEL: name: add_complex_double
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0, $a1
@@ -60,14 +60,14 @@ define { double, double } @add_complex_double({ double, double }* %a, { double,
   ; MIPS32:   $d1 = COPY [[FADD1]](s64)
   ; MIPS32:   RetRA implicit $d0, implicit $d1
 entry:
-  %.realp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 0
-  %.real = load double, double* %.realp, align 8
-  %.imagp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 1
-  %.imag = load double, double* %.imagp, align 8
-  %.realp1 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 0
-  %.real2 = load double, double* %.realp1, align 8
-  %.imagp3 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 1
-  %.imag4 = load double, double* %.imagp3, align 8
+  %.realp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 0
+  %.real = load double, ptr %.realp, align 8
+  %.imagp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 1
+  %.imag = load double, ptr %.imagp, align 8
+  %.realp1 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 0
+  %.real2 = load double, ptr %.realp1, align 8
+  %.imagp3 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 1
+  %.imag4 = load double, ptr %.imagp3, align 8
   %add.r = fadd double %.real, %.real2
   %add.i = fadd double %.imag, %.imag4
   %.fca.0.insert = insertvalue { double, double } undef, double %add.r, 0
@@ -76,7 +76,7 @@ entry:
 }
 
 declare { float, float } @ret_complex_float()
-define void @call_ret_complex_float({ float, float }* %z) {
+define void @call_ret_complex_float(ptr %z) {
   ; MIPS32-LABEL: name: call_ret_complex_float
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0
@@ -96,15 +96,15 @@ entry:
   %call = call { float, float } @ret_complex_float()
   %0 = extractvalue { float, float } %call, 0
   %1 = extractvalue { float, float } %call, 1
-  %.realp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 0
-  %.imagp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 1
-  store float %0, float* %.realp, align 4
-  store float %1, float* %.imagp, align 4
+  %.realp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 0
+  %.imagp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 1
+  store float %0, ptr %.realp, align 4
+  store float %1, ptr %.imagp, align 4
   ret void
 }
 
 declare { double, double } @ret_complex_double()
-define void @call_ret_complex_double({ double, double }* %z) {
+define void @call_ret_complex_double(ptr %z) {
   ; MIPS32-LABEL: name: call_ret_complex_double
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0
@@ -124,9 +124,9 @@ entry:
   %call = call { double, double } @ret_complex_double()
   %0 = extractvalue { double, double } %call, 0
   %1 = extractvalue { double, double } %call, 1
-  %.realp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 0
-  %.imagp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 1
-  store double %0, double* %.realp, align 8
-  store double %1, double* %.imagp, align 8
+  %.realp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 0
+  %.imagp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 1
+  store double %0, ptr %.realp, align 8
+  store double %1, ptr %.imagp, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
index a020c25d9707d..7780e532df4e8 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/call.ll
@@ -110,7 +110,7 @@ entry:
   ret i32 %doublez
 }
 
-define i32 @call_reg(i32 (i32, i32)* %f_ptr, i32 %x, i32 %y) {
+define i32 @call_reg(ptr %f_ptr, i32 %x, i32 %y) {
   ; MIPS32-LABEL: name: call_reg
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0, $a1, $a2
@@ -144,9 +144,9 @@ entry:
   ret i32 %call
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
 
-define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 signext %length) {
+define void @call_symbol(ptr nocapture readonly %src, ptr nocapture %dest, i32 signext %length) {
   ; MIPS32-LABEL: name: call_symbol
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0, $a1, $a2
@@ -164,7 +164,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
   ; MIPS32_PIC:   G_MEMCPY [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store (s8) into %ir.dest), (load (s8) from %ir.src)
   ; MIPS32_PIC:   RetRA
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %length, i1 false)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll
index 23bd1c3d6d69e..60bd5925cf2ab 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
 
 
-define i32 @ptr_arg_in_regs(i32* %p) {
+define i32 @ptr_arg_in_regs(ptr %p) {
   ; MIPS32-LABEL: name: ptr_arg_in_regs
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0
@@ -11,11 +11,11 @@ define i32 @ptr_arg_in_regs(i32* %p) {
   ; MIPS32:   $v0 = COPY [[LOAD]](s32)
   ; MIPS32:   RetRA implicit $v0
 entry:
-  %0 = load i32, i32* %p
+  %0 = load i32, ptr %p
   ret i32 %0
 }
 
-define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
+define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {
   ; MIPS32-LABEL: name: ptr_arg_on_stack
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0, $a1, $a2, $a3
@@ -29,11 +29,11 @@ define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
   ; MIPS32:   $v0 = COPY [[LOAD1]](s32)
   ; MIPS32:   RetRA implicit $v0
 entry:
-  %0 = load i32, i32* %p
+  %0 = load i32, ptr %p
   ret i32 %0
 }
 
-define i8* @ret_ptr(i8* %p) {
+define ptr @ret_ptr(ptr %p) {
   ; MIPS32-LABEL: name: ret_ptr
   ; MIPS32: bb.1.entry:
   ; MIPS32:   liveins: $a0
@@ -41,5 +41,5 @@ define i8* @ret_ptr(i8* %p) {
   ; MIPS32:   $v0 = COPY [[COPY]](p0)
   ; MIPS32:   RetRA implicit $v0
 entry:
-  ret i8* %p
+  ret ptr %p
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
index 116b76e4ee1e8..0ca54c331f1f9 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll
@@ -167,7 +167,7 @@ entry:
 }
 
 declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
-define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag) {
+define void @uadd_with_overflow(i32 %lhs, i32 %rhs, ptr %padd, ptr %pcarry_flag) {
 ; MIPS32-LABEL: uadd_with_overflow:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    addu $1, $4, $5
@@ -181,7 +181,7 @@ define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag
   %res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
   %carry_flag = extractvalue { i32, i1 } %res, 1
   %add = extractvalue { i32, i1 } %res, 0
-  store i1 %carry_flag, i1* %pcarry_flag
-  store i32 %add, i32* %padd
+  store i1 %carry_flag, ptr %pcarry_flag
+  store i32 %add, ptr %padd
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec.ll
index 74ecbf6ed7a84..1cff6fcbf5089 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @add_v16i8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w1, 0($4)
@@ -11,14 +11,14 @@ define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %add = add <16 x i8> %1, %0
-  store <16 x i8> %add, <16 x i8>* %c, align 16
+  store <16 x i8> %add, ptr %c, align 16
   ret void
 }
 
-define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @add_v8i16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w1, 0($4)
@@ -28,14 +28,14 @@ define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %add = add <8 x i16> %1, %0
-  store <8 x i16> %add, <8 x i16>* %c, align 16
+  store <8 x i16> %add, ptr %c, align 16
   ret void
 }
 
-define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @add_v4i32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w1, 0($4)
@@ -45,14 +45,14 @@ define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %add = add <4 x i32> %1, %0
-  store <4 x i32> %add, <4 x i32>* %c, align 16
+  store <4 x i32> %add, ptr %c, align 16
   ret void
 }
 
-define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @add_v2i64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w1, 0($4)
@@ -62,9 +62,9 @@ define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %add = add <2 x i64> %1, %0
-  store <2 x i64> %add, <2 x i64>* %c, align 16
+  store <2 x i64> %add, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec_builtin.ll
index ea05479ce2e8b..a3cb9cde7ff62 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/add_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>)
-define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @add_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v16i8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -12,15 +12,15 @@ define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>)
-define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @add_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v8i16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -30,15 +30,15 @@ define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>)
-define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @add_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v4i32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -48,15 +48,15 @@ define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>)
-define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @add_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: add_v2i64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -66,15 +66,15 @@ define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }
 
 declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg)
-define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
+define void @add_v16i8_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: add_v16i8_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -83,14 +83,14 @@ define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
   %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 3)
-  store <16 x i8> %1, <16 x i8>* %c, align 16
+  store <16 x i8> %1, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg)
-define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
+define void @add_v8i16_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: add_v8i16_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -99,14 +99,14 @@ define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
   %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 18)
-  store <8 x i16> %1, <8 x i16>* %c, align 16
+  store <8 x i16> %1, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg)
-define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
+define void @add_v4i32_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: add_v4i32_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -115,14 +115,14 @@ define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
   %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 25)
-  store <4 x i32> %1, <4 x i32>* %c, align 16
+  store <4 x i32> %1, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg)
-define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
+define void @add_v2i64_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: add_v2i64_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -131,8 +131,8 @@ define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
   %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 31)
-  store <2 x i64> %1, <2 x i64>* %c, align 16
+  store <2 x i64> %1, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/aggregate_struct_return.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/aggregate_struct_return.ll
index 32bc78827baf5..9d44410dcd062 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/aggregate_struct_return.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/aggregate_struct_return.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define { float, float } @add_complex_float({ float, float }* %a, { float, float }* %b) {
+define { float, float } @add_complex_float(ptr %a, ptr %b) {
 ; MIPS32-LABEL: add_complex_float:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f0, 0($4)
@@ -13,14 +13,14 @@ define { float, float } @add_complex_float({ float, float }* %a, { float, float
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %.realp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 0
-  %.real = load float, float* %.realp, align 4
-  %.imagp = getelementptr inbounds { float, float }, { float, float }* %a, i32 0, i32 1
-  %.imag = load float, float* %.imagp, align 4
-  %.realp1 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 0
-  %.real2 = load float, float* %.realp1, align 4
-  %.imagp3 = getelementptr inbounds { float, float }, { float, float }* %b, i32 0, i32 1
-  %.imag4 = load float, float* %.imagp3, align 4
+  %.realp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 0
+  %.real = load float, ptr %.realp, align 4
+  %.imagp = getelementptr inbounds { float, float }, ptr %a, i32 0, i32 1
+  %.imag = load float, ptr %.imagp, align 4
+  %.realp1 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 0
+  %.real2 = load float, ptr %.realp1, align 4
+  %.imagp3 = getelementptr inbounds { float, float }, ptr %b, i32 0, i32 1
+  %.imag4 = load float, ptr %.imagp3, align 4
   %add.r = fadd float %.real, %.real2
   %add.i = fadd float %.imag, %.imag4
   %.fca.0.insert = insertvalue { float, float } undef, float %add.r, 0
@@ -28,7 +28,7 @@ entry:
   ret { float, float } %.fca.1.insert
 }
 
-define { double, double } @add_complex_double({ double, double }* %a, { double, double }* %b) {
+define { double, double } @add_complex_double(ptr %a, ptr %b) {
 ; MIPS32-LABEL: add_complex_double:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
@@ -40,14 +40,14 @@ define { double, double } @add_complex_double({ double, double }* %a, { double,
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %.realp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 0
-  %.real = load double, double* %.realp, align 8
-  %.imagp = getelementptr inbounds { double, double }, { double, double }* %a, i32 0, i32 1
-  %.imag = load double, double* %.imagp, align 8
-  %.realp1 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 0
-  %.real2 = load double, double* %.realp1, align 8
-  %.imagp3 = getelementptr inbounds { double, double }, { double, double }* %b, i32 0, i32 1
-  %.imag4 = load double, double* %.imagp3, align 8
+  %.realp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 0
+  %.real = load double, ptr %.realp, align 8
+  %.imagp = getelementptr inbounds { double, double }, ptr %a, i32 0, i32 1
+  %.imag = load double, ptr %.imagp, align 8
+  %.realp1 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 0
+  %.real2 = load double, ptr %.realp1, align 8
+  %.imagp3 = getelementptr inbounds { double, double }, ptr %b, i32 0, i32 1
+  %.imag4 = load double, ptr %.imagp3, align 8
   %add.r = fadd double %.real, %.real2
   %add.i = fadd double %.imag, %.imag4
   %.fca.0.insert = insertvalue { double, double } undef, double %add.r, 0
@@ -56,7 +56,7 @@ entry:
 }
 
 declare { float, float } @ret_complex_float()
-define void @call_ret_complex_float({ float, float }* %z) {
+define void @call_ret_complex_float(ptr %z) {
 ; MIPS32-LABEL: call_ret_complex_float:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -24
@@ -77,15 +77,15 @@ entry:
   %call = call { float, float } @ret_complex_float()
   %0 = extractvalue { float, float } %call, 0
   %1 = extractvalue { float, float } %call, 1
-  %.realp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 0
-  %.imagp = getelementptr inbounds { float, float }, { float, float }* %z, i32 0, i32 1
-  store float %0, float* %.realp, align 4
-  store float %1, float* %.imagp, align 4
+  %.realp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 0
+  %.imagp = getelementptr inbounds { float, float }, ptr %z, i32 0, i32 1
+  store float %0, ptr %.realp, align 4
+  store float %1, ptr %.imagp, align 4
   ret void
 }
 
 declare { double, double } @ret_complex_double()
-define void @call_ret_complex_double({ double, double }* %z) {
+define void @call_ret_complex_double(ptr %z) {
 ; MIPS32-LABEL: call_ret_complex_double:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -24
@@ -106,9 +106,9 @@ entry:
   %call = call { double, double } @ret_complex_double()
   %0 = extractvalue { double, double } %call, 0
   %1 = extractvalue { double, double } %call, 1
-  %.realp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 0
-  %.imagp = getelementptr inbounds { double, double }, { double, double }* %z, i32 0, i32 1
-  store double %0, double* %.realp, align 8
-  store double %1, double* %.imagp, align 8
+  %.realp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 0
+  %.imagp = getelementptr inbounds { double, double }, ptr %z, i32 0, i32 1
+  store double %0, ptr %.realp, align 8
+  store double %1, ptr %.imagp, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/brindirect.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/brindirect.ll
index 9bb803f4cfd3c..e4f460c7ecad2 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/brindirect.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/brindirect.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @indirectbr(i8 *%addr) {
+define i32 @indirectbr(ptr %addr) {
 ; MIPS32-LABEL: indirectbr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -8
@@ -23,7 +23,7 @@ define i32 @indirectbr(i8 *%addr) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  indirectbr i8* %addr, [label %L1, label %L2]
+  indirectbr ptr %addr, [label %L1, label %L2]
 
 L1:
   ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/call.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/call.ll
index 0312f49fa6ee7..ee8ca2d9a9bdc 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/call.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/call.ll
@@ -106,7 +106,7 @@ entry:
   ret i32 %doublez
 }
 
-define i32 @call_reg(i32 (i32, i32)* %f_ptr, i32 %x, i32 %y) {
+define i32 @call_reg(ptr %f_ptr, i32 %x, i32 %y) {
 ; MIPS32-LABEL: call_reg:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -24
@@ -143,9 +143,9 @@ entry:
   ret i32 %call
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1 immarg)
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1 immarg)
 
-define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 signext %length) {
+define void @call_symbol(ptr nocapture readonly %src, ptr nocapture %dest, i32 signext %length) {
 ; MIPS32-LABEL: call_symbol:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -24
@@ -178,6 +178,6 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
 ; MIPS32_PIC-NEXT:    jr $ra
 ; MIPS32_PIC-NEXT:    nop
 entry:
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 %length, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/dyn_stackalloc.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/dyn_stackalloc.ll
index 294bc71443ea5..201706053657a 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/dyn_stackalloc.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/dyn_stackalloc.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-declare i32 @puts(i8*)
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1)
+declare i32 @puts(ptr)
+declare void @llvm.memset.p0.i32(ptr, i8, i32, i1)
 
 define void @Print_c_N_times(i8 %c, i32 %N) {
 ; MIPS32-LABEL: Print_c_N_times:
@@ -53,9 +53,9 @@ define void @Print_c_N_times(i8 %c, i32 %N) {
 entry:
   %add = add i32 %N, 1
   %vla = alloca i8, i32 %add, align 1
-  call void @llvm.memset.p0i8.i32(i8* align 1 %vla, i8 %c, i32 %N, i1 false)
-  %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %N
-  store i8 0, i8* %arrayidx, align 1
-  %call = call i32 @puts(i8* %vla)
+  call void @llvm.memset.p0.i32(ptr align 1 %vla, i8 %c, i32 %N, i1 false)
+  %arrayidx = getelementptr inbounds i8, ptr %vla, i32 %N
+  store i8 0, ptr %arrayidx, align 1
+  %call = call i32 @puts(ptr %vla)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec.ll
index 0413bf231d4ac..5a2715ce5dd09 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <4 x float>  @llvm.fabs.v4f32(<4 x float>  %Val)
-define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) {
+define void @fabs_v4f32(ptr %a, ptr %c) {
 ; P5600-LABEL: fabs_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -11,14 +11,14 @@ define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %a, align 16
   %fabs = call <4 x float> @llvm.fabs.v4f32 (<4 x float> %0)
-  store <4 x float> %fabs, <4 x float>* %c, align 16
+  store <4 x float> %fabs, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.fabs.v2f64(<2 x double> %Val)
-define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) {
+define void @fabs_v2f64(ptr %a, ptr %c) {
 ; P5600-LABEL: fabs_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -27,8 +27,8 @@ define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
+  %0 = load <2 x double>, ptr %a, align 16
   %fabs = call <2 x double> @llvm.fabs.v2f64 (<2 x double> %0)
-  store <2 x double> %fabs, <2 x double>* %c, align 16
+  store <2 x double> %fabs, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec_builtin.ll
index b29d2f9a5682a..81f837f603697 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fabs_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>)
-define void @fabs_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
+define void @fabs_v4f32_builtin(ptr %a, ptr %c) {
 ; P5600-LABEL: fabs_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -11,15 +11,15 @@ define void @fabs_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %a, align 16
   %1 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %0)
-  store <4 x float> %1, <4 x float>* %c, align 16
+  store <4 x float> %1, ptr %c, align 16
   ret void
 }
 
 
 declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>)
-define void @fabs_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
+define void @fabs_v2f64_builtin(ptr %a, ptr %c) {
 ; P5600-LABEL: fabs_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -28,8 +28,8 @@ define void @fabs_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
+  %0 = load <2 x double>, ptr %a, align 16
   %1 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %0)
-  store <2 x double> %1, <2 x double>* %c, align 16
+  store <2 x double> %1, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fence.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fence.ll
index ddf55a73534d6..45a2846ea5378 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fence.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fence.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @atomic_load_i32(i32* %ptr) {
+define i32 @atomic_load_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %val = load atomic i32, i32* %ptr acquire, align 4
+  %val = load atomic i32, ptr %ptr acquire, align 4
   ret i32 %val
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations.ll
index 6a7486fca5b4b..fa225de7ca652 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fadd_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -11,15 +11,15 @@ define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %add = fadd <4 x float> %0, %1
-  store <4 x float> %add, <4 x float>* %c, align 16
+  store <4 x float> %add, ptr %c, align 16
   ret void
 }
 
 
-define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fadd_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -29,15 +29,15 @@ define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %add = fadd <2 x double> %0, %1
-  store <2 x double> %add, <2 x double>* %c, align 16
+  store <2 x double> %add, ptr %c, align 16
   ret void
 }
 
 
-define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fsub_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -47,15 +47,15 @@ define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %sub = fsub <4 x float> %0, %1
-  store <4 x float> %sub, <4 x float>* %c, align 16
+  store <4 x float> %sub, ptr %c, align 16
   ret void
 }
 
 
-define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fsub_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -65,15 +65,15 @@ define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %sub = fsub <2 x double> %0, %1
-  store <2 x double> %sub, <2 x double>* %c, align 16
+  store <2 x double> %sub, ptr %c, align 16
   ret void
 }
 
 
-define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fmul_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -83,15 +83,15 @@ define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %mul = fmul <4 x float> %0, %1
-  store <4 x float> %mul, <4 x float>* %c, align 16
+  store <4 x float> %mul, ptr %c, align 16
   ret void
 }
 
 
-define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fmul_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -101,15 +101,15 @@ define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %mul = fmul <2 x double> %0, %1
-  store <2 x double> %mul, <2 x double>* %c, align 16
+  store <2 x double> %mul, ptr %c, align 16
   ret void
 }
 
 
-define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fdiv_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -119,15 +119,15 @@ define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %div = fdiv <4 x float> %0, %1
-  store <4 x float> %div, <4 x float>* %c, align 16
+  store <4 x float> %div, ptr %c, align 16
   ret void
 }
 
 
-define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fdiv_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -137,9 +137,9 @@ define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %div = fdiv <2 x double> %0, %1
-  store <2 x double> %div, <2 x double>* %c, align 16
+  store <2 x double> %div, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations_builtin.ll
index ad7a710be1487..3120066e74733 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/floating_point_vec_arithmetic_operations_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>)
-define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fadd_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fadd_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -12,15 +12,15 @@ define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* %c, align 16
+  store <4 x float> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>)
-define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fadd_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fadd_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -30,15 +30,15 @@ define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* %c, align 16
+  store <2 x double> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>)
-define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fsub_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fsub_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -48,15 +48,15 @@ define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* %c, align 16
+  store <4 x float> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>)
-define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fsub_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fsub_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -66,15 +66,15 @@ define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* %c, align 16
+  store <2 x double> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>)
-define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fmul_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fmul_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -84,15 +84,15 @@ define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* %c, align 16
+  store <4 x float> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>)
-define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fmul_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fmul_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -102,15 +102,15 @@ define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* %c, align 16
+  store <2 x double> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>)
-define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) {
+define void @fdiv_v4f32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fdiv_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -120,15 +120,15 @@ define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* %c, align 16
+  store <4 x float> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>)
-define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) {
+define void @fdiv_v2f64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: fdiv_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -138,9 +138,9 @@ define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
-  %1 = load <2 x double>, <2 x double>* %b, align 16
+  %0 = load <2 x double>, ptr %a, align 16
+  %1 = load <2 x double>, ptr %b, align 16
   %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* %c, align 16
+  store <2 x double> %2, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec.ll
index 27bbb9aa59c86..d43fda07757d1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <4 x float>  @llvm.sqrt.v4f32(<4 x float>  %Val)
-define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) {
+define void @sqrt_v4f32(ptr %a, ptr %c) {
 ; P5600-LABEL: sqrt_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -11,14 +11,14 @@ define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %a, align 16
   %sqrt = call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %0)
-  store <4 x float> %sqrt, <4 x float>* %c, align 16
+  store <4 x float> %sqrt, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.sqrt.v2f64(<2 x double> %Val)
-define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) {
+define void @sqrt_v2f64(ptr %a, ptr %c) {
 ; P5600-LABEL: sqrt_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -27,8 +27,8 @@ define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
+  %0 = load <2 x double>, ptr %a, align 16
   %sqrt = call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %0)
-  store <2 x double> %sqrt, <2 x double>* %c, align 16
+  store <2 x double> %sqrt, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec_builtin.ll
index a765591d42f35..68a5fa267b209 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/fsqrt_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>)
-define void @fsqrt_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
+define void @fsqrt_v4f32_builtin(ptr %a, ptr %c) {
 ; P5600-LABEL: fsqrt_v4f32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -11,14 +11,14 @@ define void @fsqrt_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %a, align 16
   %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* %c, align 16
+  store <4 x float> %1, ptr %c, align 16
   ret void
 }
 
 declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>)
-define void @fsqrt_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
+define void @fsqrt_v2f64_builtin(ptr %a, ptr %c) {
 ; P5600-LABEL: fsqrt_v2f64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -27,9 +27,9 @@ define void @fsqrt_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %a, align 16
+  %0 = load <2 x double>, ptr %a, align 16
   %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* %c, align 16
+  store <2 x double> %1, ptr %c, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address.ll
index a23ab7c3ca8f7..10c3dc31610d3 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address.ll
@@ -24,9 +24,9 @@ define i32 @main() {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 signext 1234567890)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 signext 1234567890)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address_pic.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address_pic.ll
index 8e8ca91eb9de4..19fe8b521b89b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address_pic.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/global_address_pic.ll
@@ -74,7 +74,7 @@ define i32 @ret_global_int() {
 ; MIPS32_PIC-NEXT:    jr $ra
 ; MIPS32_PIC-NEXT:    nop
 entry:
-  %0 = load i32, i32* @val
+  %0 = load i32, ptr @val
   ret i32 %0
 }
 
@@ -90,6 +90,6 @@ define i32 @ret_global_int_with_local_linkage() {
 ; MIPS32_PIC-NEXT:    jr $ra
 ; MIPS32_PIC-NEXT:    nop
 entry:
-  %0 = load i32, i32* @val_with_local_linkage
+  %0 = load i32, ptr @val_with_local_linkage
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll
index a7e0d05544be8..ab474e1f8e722 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll
@@ -117,7 +117,7 @@ entry:
   ret i1 %cmp
 }
 
-define i1 @eq_ptr(i32* %a, i32* %b){
+define i1 @eq_ptr(ptr %a, ptr %b){
 ; MIPS32-LABEL: eq_ptr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    xor $1, $4, $5
@@ -125,7 +125,7 @@ define i1 @eq_ptr(i32* %a, i32* %b){
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %cmp = icmp eq i32* %a, %b
+  %cmp = icmp eq ptr %a, %b
   ret i1 %cmp
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/inttoptr_and_ptrtoint.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/inttoptr_and_ptrtoint.ll
index c27b5a939a4ae..3b97d7e68fa52 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/inttoptr_and_ptrtoint.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/inttoptr_and_ptrtoint.ll
@@ -1,24 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32* @inttoptr(i32 %a) {
+define ptr @inttoptr(i32 %a) {
 ; MIPS32-LABEL: inttoptr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    move $2, $4
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = inttoptr i32 %a to i32*
-  ret i32* %0
+  %0 = inttoptr i32 %a to ptr
+  ret ptr %0
 }
 
-define i32 @ptrtoint(i32* %a) {
+define i32 @ptrtoint(ptr %a) {
 ; MIPS32-LABEL: ptrtoint:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    move $2, $4
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = ptrtoint i32* %a to i32
+  %0 = ptrtoint ptr %a to i32
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
index bf163ecaef5d6..82ed06ec3ff1b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @load_i32(i32* %ptr) {
+define i32 @load_i32(ptr %ptr) {
 ; MIPS32-LABEL: load_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %ptr
+  %0 = load i32, ptr %ptr
   ret i32 %0
 }
 
-define i64 @load_i64(i64* %ptr) {
+define i64 @load_i64(ptr %ptr) {
 ; MIPS32-LABEL: load_i64:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -20,11 +20,11 @@ define i64 @load_i64(i64* %ptr) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i64, i64* %ptr
+  %0 = load i64, ptr %ptr
   ret i64 %0
 }
 
-define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {
+define void @load_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b) {
 ; MIPS32-LABEL: load_ambiguous_i64_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
@@ -32,23 +32,23 @@ define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i64, i64* %i64_ptr_a
-  store i64 %0, i64* %i64_ptr_b
+  %0 = load i64, ptr %i64_ptr_a
+  store i64 %0, ptr %i64_ptr_b
   ret void
 }
 
-define float @load_float(float* %ptr) {
+define float @load_float(ptr %ptr) {
 ; MIPS32-LABEL: load_float:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f0, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %ptr
+  %0 = load float, ptr %ptr
   ret float %0
 }
 
-define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {
+define void @load_ambiguous_float_in_gpr(ptr %float_ptr_a, ptr %float_ptr_b) {
 ; MIPS32-LABEL: load_ambiguous_float_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $1, 0($4)
@@ -56,18 +56,18 @@ define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %float_ptr_a
-  store float %0, float* %float_ptr_b
+  %0 = load float, ptr %float_ptr_a
+  store float %0, ptr %float_ptr_b
   ret void
 }
 
-define double @load_double(double* %ptr) {
+define double @load_double(ptr %ptr) {
 ; MIPS32-LABEL: load_double:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load double, double* %ptr
+  %0 = load double, ptr %ptr
   ret double %0
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_4_unaligned.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_4_unaligned.ll
index 90043c0e9a122..037964d0f7699 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_4_unaligned.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_4_unaligned.ll
@@ -30,7 +30,7 @@ define float @load_float_align1() {
 ; MIPS32R6-NEXT:    lwc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load float, float* @float_align1, align 1
+  %0 = load float, ptr @float_align1, align 1
   ret float %0
 }
 
@@ -53,7 +53,7 @@ define float @load_float_align2() {
 ; MIPS32R6-NEXT:    lwc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load float, float* @float_align2, align 2
+  %0 = load float, ptr @float_align2, align 2
   ret float %0
 }
 
@@ -73,7 +73,7 @@ define float @load_float_align4() {
 ; MIPS32R6-NEXT:    lwc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load float, float* @float_align4, align 4
+  %0 = load float, ptr @float_align4, align 4
   ret float %0
 }
 
@@ -93,7 +93,7 @@ define float @load_float_align8() {
 ; MIPS32R6-NEXT:    lwc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load float, float* @float_align8, align 8
+  %0 = load float, ptr @float_align8, align 8
   ret float %0
 }
 
@@ -115,7 +115,7 @@ define i32 @load_i32_align1() {
 ; MIPS32R6-NEXT:    lw $2, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i32, i32* @i32_align1, align 1
+  %0 = load i32, ptr @i32_align1, align 1
   ret i32 %0
 }
 
@@ -137,7 +137,7 @@ define i32 @load_i32_align2() {
 ; MIPS32R6-NEXT:    lw $2, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i32, i32* @i32_align2, align 2
+  %0 = load i32, ptr @i32_align2, align 2
   ret i32 %0
 }
 
@@ -157,7 +157,7 @@ define i32 @load_i32_align4() {
 ; MIPS32R6-NEXT:    lw $2, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i32, i32* @i32_align4, align 4
+  %0 = load i32, ptr @i32_align4, align 4
   ret i32 %0
 }
 
@@ -177,6 +177,6 @@ define i32 @load_i32_align8() {
 ; MIPS32R6-NEXT:    lw $2, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i32, i32* @i32_align8, align 8
+  %0 = load i32, ptr @i32_align8, align 8
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic.ll
index e4f403c45ca2d..850e95bf49e1e 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_atomic.ll
@@ -5,41 +5,41 @@
 ; unordered
 ; --------------------------------------------------------------------
 
-define i8 @atomic_load_unordered_i8(i8* %ptr) {
+define i8 @atomic_load_unordered_i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   ret i8 %load
 }
 
-define i32 @atomic_load_unordered_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_unordered_i8_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %sext = sext i8 %load to i32
   ret i32 %sext
 }
 
-define i16 @atomic_load_unordered_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_unordered_i8_sext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_sext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %sext = sext i8 %load to i16
   ret i16 %sext
 }
 
-define i64 @atomic_load_unordered_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_unordered_i8_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -47,68 +47,68 @@ define i64 @atomic_load_unordered_i8_sext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %sext = sext i8 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_unordered_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_unordered_i8_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %zext = zext i8 %load to i32
   ret i32 %zext
 }
 
-define i16 @atomic_load_unordered_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_unordered_i8_zext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_zext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %zext = zext i8 %load to i16
   ret i16 %zext
 }
 
-define i64 @atomic_load_unordered_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_unordered_i8_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i8_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    andi $2, $1, 255
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i8, i8* %ptr unordered, align 1
+  %load = load atomic i8, ptr %ptr unordered, align 1
   %zext = zext i8 %load to i64
   ret i64 %zext
 }
 
-define i16 @atomic_load_unordered_i16(i16* %ptr) {
+define i16 @atomic_load_unordered_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i16, i16* %ptr unordered, align 2
+  %load = load atomic i16, ptr %ptr unordered, align 2
   ret i16 %load
 }
 
-define i32 @atomic_load_unordered_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_unordered_i16_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i16_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 16
-  %load = load atomic i16, i16* %ptr unordered, align 2
+  %load = load atomic i16, ptr %ptr unordered, align 2
   %sext = sext i16 %load to i32
   ret i32 %sext
 }
 
-define i64 @atomic_load_unordered_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_unordered_i16_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i16_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -116,127 +116,127 @@ define i64 @atomic_load_unordered_i16_sext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i16, i16* %ptr unordered, align 2
+  %load = load atomic i16, ptr %ptr unordered, align 2
   %sext = sext i16 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_unordered_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_unordered_i16_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i16_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 65535
-  %load = load atomic i16, i16* %ptr unordered, align 2
+  %load = load atomic i16, ptr %ptr unordered, align 2
   %zext = zext i16 %load to i32
   ret i32 %zext
 }
 
-define i64 @atomic_load_unordered_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_unordered_i16_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i16_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    andi $2, $1, 65535
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i16, i16* %ptr unordered, align 2
+  %load = load atomic i16, ptr %ptr unordered, align 2
   %zext = zext i16 %load to i64
   ret i64 %zext
 }
 
-define i32 @atomic_load_unordered_i32(i32* %ptr) {
+define i32 @atomic_load_unordered_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i32, i32* %ptr unordered, align 4
+  %load = load atomic i32, ptr %ptr unordered, align 4
   ret i32 %load
 }
 
-define i64 @atomic_load_unordered_i64(i64* %ptr) {
+define i64 @atomic_load_unordered_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
 ; MIPS32-NEXT:    mfc1 $2, $f0
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    mfc1 $3, $f1
-  %load = load atomic i64, i64* %ptr unordered, align 8
+  %load = load atomic i64, ptr %ptr unordered, align 8
   ret i64 %load
 }
 
-define float @atomic_load_unordered_f32(float* %ptr) {
+define float @atomic_load_unordered_f32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_f32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lwc1 $f0, 64($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds float, float* %ptr, i32 16
-  %load = load atomic float, float* %gep unordered, align 4
+  %gep = getelementptr inbounds float, ptr %ptr, i32 16
+  %load = load atomic float, ptr %gep unordered, align 4
   ret float %load
 }
 
-define double @atomic_load_unordered_f64(double* %ptr) {
+define double @atomic_load_unordered_f64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_f64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 128($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds double, double* %ptr, i32 16
-  %load = load atomic double, double* %gep unordered, align 8
+  %gep = getelementptr inbounds double, ptr %ptr, i32 16
+  %load = load atomic double, ptr %gep unordered, align 8
   ret double %load
 }
 
-define i8* @atomic_load_unordered_p0i8(i8** %ptr) {
+define ptr @atomic_load_unordered_p0i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_unordered_p0i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 64($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
-  %load = load atomic i8*, i8** %gep unordered, align 4
-  ret i8* %load
+  %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+  %load = load atomic ptr, ptr %gep unordered, align 4
+  ret ptr %load
 }
 
 ; --------------------------------------------------------------------
 ; monotonic
 ; --------------------------------------------------------------------
 
-define i8 @atomic_load_monotonic_i8(i8* %ptr) {
+define i8 @atomic_load_monotonic_i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   ret i8 %load
 }
 
-define i32 @atomic_load_monotonic_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_monotonic_i8_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %sext = sext i8 %load to i32
   ret i32 %sext
 }
 
-define i16 @atomic_load_monotonic_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_monotonic_i8_sext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %sext = sext i8 %load to i16
   ret i16 %sext
 }
 
-define i64 @atomic_load_monotonic_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_monotonic_i8_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -244,68 +244,68 @@ define i64 @atomic_load_monotonic_i8_sext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %sext = sext i8 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_monotonic_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_monotonic_i8_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %zext = zext i8 %load to i32
   ret i32 %zext
 }
 
-define i16 @atomic_load_monotonic_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_monotonic_i8_zext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %zext = zext i8 %load to i16
   ret i16 %zext
 }
 
-define i64 @atomic_load_monotonic_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_monotonic_i8_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i8_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    andi $2, $1, 255
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i8, i8* %ptr monotonic, align 1
+  %load = load atomic i8, ptr %ptr monotonic, align 1
   %zext = zext i8 %load to i64
   ret i64 %zext
 }
 
-define i16 @atomic_load_monotonic_i16(i16* %ptr) {
+define i16 @atomic_load_monotonic_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i16, i16* %ptr monotonic, align 2
+  %load = load atomic i16, ptr %ptr monotonic, align 2
   ret i16 %load
 }
 
-define i32 @atomic_load_monotonic_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_monotonic_i16_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    sll $1, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 16
-  %load = load atomic i16, i16* %ptr monotonic, align 2
+  %load = load atomic i16, ptr %ptr monotonic, align 2
   %sext = sext i16 %load to i32
   ret i32 %sext
 }
 
-define i64 @atomic_load_monotonic_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_monotonic_i16_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i16_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -313,104 +313,104 @@ define i64 @atomic_load_monotonic_i16_sext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i16, i16* %ptr monotonic, align 2
+  %load = load atomic i16, ptr %ptr monotonic, align 2
   %sext = sext i16 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_monotonic_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_monotonic_i16_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 65535
-  %load = load atomic i16, i16* %ptr monotonic, align 2
+  %load = load atomic i16, ptr %ptr monotonic, align 2
   %zext = zext i16 %load to i32
   ret i32 %zext
 }
 
-define i64 @atomic_load_monotonic_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_monotonic_i16_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i16_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    andi $2, $1, 65535
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i16, i16* %ptr monotonic, align 2
+  %load = load atomic i16, ptr %ptr monotonic, align 2
   %zext = zext i16 %load to i64
   ret i64 %zext
 }
 
-define i32 @atomic_load_monotonic_i32(i32* %ptr) {
+define i32 @atomic_load_monotonic_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i32, i32* %ptr monotonic, align 4
+  %load = load atomic i32, ptr %ptr monotonic, align 4
   ret i32 %load
 }
 
-define i64 @atomic_load_monotonic_i64(i64* %ptr) {
+define i64 @atomic_load_monotonic_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
 ; MIPS32-NEXT:    mfc1 $2, $f0
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    mfc1 $3, $f1
-  %load = load atomic i64, i64* %ptr monotonic, align 8
+  %load = load atomic i64, ptr %ptr monotonic, align 8
   ret i64 %load
 }
 
-define float @atomic_load_monotonic_f32(float* %ptr) {
+define float @atomic_load_monotonic_f32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_f32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lwc1 $f0, 64($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds float, float* %ptr, i32 16
-  %load = load atomic float, float* %gep monotonic, align 4
+  %gep = getelementptr inbounds float, ptr %ptr, i32 16
+  %load = load atomic float, ptr %gep monotonic, align 4
   ret float %load
 }
 
-define double @atomic_load_monotonic_f64(double* %ptr) {
+define double @atomic_load_monotonic_f64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_f64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 128($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds double, double* %ptr, i32 16
-  %load = load atomic double, double* %gep monotonic, align 8
+  %gep = getelementptr inbounds double, ptr %ptr, i32 16
+  %load = load atomic double, ptr %gep monotonic, align 8
   ret double %load
 }
 
-define i8* @atomic_load_monotonic_p0i8(i8** %ptr) {
+define ptr @atomic_load_monotonic_p0i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_monotonic_p0i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 64($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
-  %load = load atomic i8*, i8** %gep monotonic, align 4
-  ret i8* %load
+  %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+  %load = load atomic ptr, ptr %gep monotonic, align 4
+  ret ptr %load
 }
 
 ; --------------------------------------------------------------------
 ; acquire
 ; --------------------------------------------------------------------
 
-define i8 @atomic_load_acquire_i8(i8* %ptr) {
+define i8 @atomic_load_acquire_i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   ret i8 %load
 }
 
-define i32 @atomic_load_acquire_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_acquire_i8_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -418,12 +418,12 @@ define i32 @atomic_load_acquire_i8_sext_i32(i8* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %sext = sext i8 %load to i32
   ret i32 %sext
 }
 
-define i16 @atomic_load_acquire_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_acquire_i8_sext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_sext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -431,12 +431,12 @@ define i16 @atomic_load_acquire_i8_sext_i16(i8* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %sext = sext i8 %load to i16
   ret i16 %sext
 }
 
-define i64 @atomic_load_acquire_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_acquire_i8_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -445,36 +445,36 @@ define i64 @atomic_load_acquire_i8_sext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %sext = sext i8 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_acquire_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_acquire_i8_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %zext = zext i8 %load to i32
   ret i32 %zext
 }
 
-define i16 @atomic_load_acquire_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_acquire_i8_zext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_zext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %zext = zext i8 %load to i16
   ret i16 %zext
 }
 
-define i64 @atomic_load_acquire_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_acquire_i8_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i8_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -482,23 +482,23 @@ define i64 @atomic_load_acquire_i8_zext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    andi $2, $1, 255
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i8, i8* %ptr acquire, align 1
+  %load = load atomic i8, ptr %ptr acquire, align 1
   %zext = zext i8 %load to i64
   ret i64 %zext
 }
 
-define i16 @atomic_load_acquire_i16(i16* %ptr) {
+define i16 @atomic_load_acquire_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i16, i16* %ptr acquire, align 2
+  %load = load atomic i16, ptr %ptr acquire, align 2
   ret i16 %load
 }
 
-define i32 @atomic_load_acquire_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_acquire_i16_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i16_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -506,12 +506,12 @@ define i32 @atomic_load_acquire_i16_sext_i32(i16* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 16
-  %load = load atomic i16, i16* %ptr acquire, align 2
+  %load = load atomic i16, ptr %ptr acquire, align 2
   %sext = sext i16 %load to i32
   ret i32 %sext
 }
 
-define i64 @atomic_load_acquire_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_acquire_i16_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i16_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -520,24 +520,24 @@ define i64 @atomic_load_acquire_i16_sext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i16, i16* %ptr acquire, align 2
+  %load = load atomic i16, ptr %ptr acquire, align 2
   %sext = sext i16 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_acquire_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_acquire_i16_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i16_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 65535
-  %load = load atomic i16, i16* %ptr acquire, align 2
+  %load = load atomic i16, ptr %ptr acquire, align 2
   %zext = zext i16 %load to i32
   ret i32 %zext
 }
 
-define i64 @atomic_load_acquire_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_acquire_i16_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i16_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -545,23 +545,23 @@ define i64 @atomic_load_acquire_i16_zext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    andi $2, $1, 65535
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i16, i16* %ptr acquire, align 2
+  %load = load atomic i16, ptr %ptr acquire, align 2
   %zext = zext i16 %load to i64
   ret i64 %zext
 }
 
-define i32 @atomic_load_acquire_i32(i32* %ptr) {
+define i32 @atomic_load_acquire_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i32, i32* %ptr acquire, align 4
+  %load = load atomic i32, ptr %ptr acquire, align 4
   ret i32 %load
 }
 
-define i64 @atomic_load_acquire_i64(i64* %ptr) {
+define i64 @atomic_load_acquire_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
@@ -569,62 +569,62 @@ define i64 @atomic_load_acquire_i64(i64* %ptr) {
 ; MIPS32-NEXT:    mfc1 $2, $f0
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    mfc1 $3, $f1
-  %load = load atomic i64, i64* %ptr acquire, align 8
+  %load = load atomic i64, ptr %ptr acquire, align 8
   ret i64 %load
 }
 
-define float @atomic_load_acquire_f32(float* %ptr) {
+define float @atomic_load_acquire_f32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_f32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lwc1 $f0, 64($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds float, float* %ptr, i32 16
-  %load = load atomic float, float* %gep acquire, align 4
+  %gep = getelementptr inbounds float, ptr %ptr, i32 16
+  %load = load atomic float, ptr %gep acquire, align 4
   ret float %load
 }
 
-define double @atomic_load_acquire_f64(double* %ptr) {
+define double @atomic_load_acquire_f64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_f64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 128($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds double, double* %ptr, i32 16
-  %load = load atomic double, double* %gep acquire, align 8
+  %gep = getelementptr inbounds double, ptr %ptr, i32 16
+  %load = load atomic double, ptr %gep acquire, align 8
   ret double %load
 }
 
-define i8* @atomic_load_acquire_p0i8(i8** %ptr) {
+define ptr @atomic_load_acquire_p0i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_acquire_p0i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 64($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
-  %load = load atomic i8*, i8** %gep acquire, align 4
-  ret i8* %load
+  %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+  %load = load atomic ptr, ptr %gep acquire, align 4
+  ret ptr %load
 }
 
 ; --------------------------------------------------------------------
 ; seq_cst
 ; --------------------------------------------------------------------
 
-define i8 @atomic_load_seq_cst_i8(i8* %ptr) {
+define i8 @atomic_load_seq_cst_i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   ret i8 %load
 }
 
-define i32 @atomic_load_seq_cst_i8_sext_i32(i8* %ptr) {
+define i32 @atomic_load_seq_cst_i8_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -632,12 +632,12 @@ define i32 @atomic_load_seq_cst_i8_sext_i32(i8* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %sext = sext i8 %load to i32
   ret i32 %sext
 }
 
-define i16 @atomic_load_seq_cst_i8_sext_i16(i8* %ptr) {
+define i16 @atomic_load_seq_cst_i8_sext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -645,12 +645,12 @@ define i16 @atomic_load_seq_cst_i8_sext_i16(i8* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 24
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %sext = sext i8 %load to i16
   ret i16 %sext
 }
 
-define i64 @atomic_load_seq_cst_i8_sext_i64(i8* %ptr) {
+define i64 @atomic_load_seq_cst_i8_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -659,36 +659,36 @@ define i64 @atomic_load_seq_cst_i8_sext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 24
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %sext = sext i8 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_seq_cst_i8_zext_i32(i8* %ptr) {
+define i32 @atomic_load_seq_cst_i8_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %zext = zext i8 %load to i32
   ret i32 %zext
 }
 
-define i16 @atomic_load_seq_cst_i8_zext_i16(i8* %ptr) {
+define i16 @atomic_load_seq_cst_i8_zext_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 255
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %zext = zext i8 %load to i16
   ret i16 %zext
 }
 
-define i64 @atomic_load_seq_cst_i8_zext_i64(i8* %ptr) {
+define i64 @atomic_load_seq_cst_i8_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i8_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lbu $1, 0($4)
@@ -696,23 +696,23 @@ define i64 @atomic_load_seq_cst_i8_zext_i64(i8* %ptr) {
 ; MIPS32-NEXT:    andi $2, $1, 255
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i8, i8* %ptr seq_cst, align 1
+  %load = load atomic i8, ptr %ptr seq_cst, align 1
   %zext = zext i8 %load to i64
   ret i64 %zext
 }
 
-define i16 @atomic_load_seq_cst_i16(i16* %ptr) {
+define i16 @atomic_load_seq_cst_i16(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i16:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i16, i16* %ptr seq_cst, align 2
+  %load = load atomic i16, ptr %ptr seq_cst, align 2
   ret i16 %load
 }
 
-define i32 @atomic_load_seq_cst_i16_sext_i32(i16* %ptr) {
+define i32 @atomic_load_seq_cst_i16_sext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -720,12 +720,12 @@ define i32 @atomic_load_seq_cst_i16_sext_i32(i16* %ptr) {
 ; MIPS32-NEXT:    sll $1, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $2, $1, 16
-  %load = load atomic i16, i16* %ptr seq_cst, align 2
+  %load = load atomic i16, ptr %ptr seq_cst, align 2
   %sext = sext i16 %load to i32
   ret i32 %sext
 }
 
-define i64 @atomic_load_seq_cst_i16_sext_i64(i16* %ptr) {
+define i64 @atomic_load_seq_cst_i16_sext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i16_sext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -734,24 +734,24 @@ define i64 @atomic_load_seq_cst_i16_sext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    sra $2, $1, 16
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    sra $3, $1, 31
-  %load = load atomic i16, i16* %ptr seq_cst, align 2
+  %load = load atomic i16, ptr %ptr seq_cst, align 2
   %sext = sext i16 %load to i64
   ret i64 %sext
 }
 
-define i32 @atomic_load_seq_cst_i16_zext_i32(i16* %ptr) {
+define i32 @atomic_load_seq_cst_i16_zext_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $2, $1, 65535
-  %load = load atomic i16, i16* %ptr seq_cst, align 2
+  %load = load atomic i16, ptr %ptr seq_cst, align 2
   %zext = zext i16 %load to i32
   ret i32 %zext
 }
 
-define i64 @atomic_load_seq_cst_i16_zext_i64(i16* %ptr) {
+define i64 @atomic_load_seq_cst_i16_zext_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i16_zext_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lhu $1, 0($4)
@@ -759,23 +759,23 @@ define i64 @atomic_load_seq_cst_i16_zext_i64(i16* %ptr) {
 ; MIPS32-NEXT:    andi $2, $1, 65535
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    andi $3, $1, 0
-  %load = load atomic i16, i16* %ptr seq_cst, align 2
+  %load = load atomic i16, ptr %ptr seq_cst, align 2
   %zext = zext i16 %load to i64
   ret i64 %zext
 }
 
-define i32 @atomic_load_seq_cst_i32(i32* %ptr) {
+define i32 @atomic_load_seq_cst_i32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %load = load atomic i32, i32* %ptr seq_cst, align 4
+  %load = load atomic i32, ptr %ptr seq_cst, align 4
   ret i32 %load
 }
 
-define i64 @atomic_load_seq_cst_i64(i64* %ptr) {
+define i64 @atomic_load_seq_cst_i64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_i64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 0($4)
@@ -783,42 +783,42 @@ define i64 @atomic_load_seq_cst_i64(i64* %ptr) {
 ; MIPS32-NEXT:    mfc1 $2, $f0
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    mfc1 $3, $f1
-  %load = load atomic i64, i64* %ptr seq_cst, align 8
+  %load = load atomic i64, ptr %ptr seq_cst, align 8
   ret i64 %load
 }
 
-define float @atomic_load_seq_cst_f32(float* %ptr) {
+define float @atomic_load_seq_cst_f32(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_f32:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lwc1 $f0, 64($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds float, float* %ptr, i32 16
-  %load = load atomic float, float* %gep seq_cst, align 4
+  %gep = getelementptr inbounds float, ptr %ptr, i32 16
+  %load = load atomic float, ptr %gep seq_cst, align 4
   ret float %load
 }
 
-define double @atomic_load_seq_cst_f64(double* %ptr) {
+define double @atomic_load_seq_cst_f64(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_f64:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    ldc1 $f0, 128($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds double, double* %ptr, i32 16
-  %load = load atomic double, double* %gep seq_cst, align 8
+  %gep = getelementptr inbounds double, ptr %ptr, i32 16
+  %load = load atomic double, ptr %gep seq_cst, align 8
   ret double %load
 }
 
-define i8* @atomic_load_seq_cst_p0i8(i8** %ptr) {
+define ptr @atomic_load_seq_cst_p0i8(ptr %ptr) {
 ; MIPS32-LABEL: atomic_load_seq_cst_p0i8:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    lw $2, 64($4)
 ; MIPS32-NEXT:    sync
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
-  %gep = getelementptr inbounds i8*, i8** %ptr, i32 16
-  %load = load atomic i8*, i8** %gep seq_cst, align 4
-  ret i8* %load
+  %gep = getelementptr inbounds ptr, ptr %ptr, i32 16
+  %load = load atomic ptr, ptr %gep seq_cst, align 4
+  ret ptr %load
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
index a2afbf1c637ec..6d7a8306c3992 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_split_because_of_memsize_or_align.ll
@@ -28,7 +28,7 @@
 @i64_align4 = common global i64 0, align 4
 @i64_align8 = common global i64 0, align 8
 
-define i32 @load3align1(%struct.MemSize3_Align1* %S) {
+define i32 @load3align1(ptr %S) {
 ; MIPS32-LABEL: load3align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $at
@@ -48,13 +48,12 @@ define i32 @load3align1(%struct.MemSize3_Align1* %S) {
 ; MIPS32R6-NEXT:    and $2, $1, $2
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align1* %S to i24*
-  %bf.load = load i24, i24* %0, align 1
+  %bf.load = load i24, ptr %S, align 1
   %bf.cast = zext i24 %bf.load to i32
   ret i32 %bf.cast
 }
 
-define i32 @load3align2(%struct.MemSize3_Align2* %S) {
+define i32 @load3align2(ptr %S) {
 ; MIPS32-LABEL: load3align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $at
@@ -74,13 +73,12 @@ define i32 @load3align2(%struct.MemSize3_Align2* %S) {
 ; MIPS32R6-NEXT:    and $2, $1, $2
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align2* %S to i24*
-  %bf.load = load i24, i24* %0, align 2
+  %bf.load = load i24, ptr %S, align 2
   %bf.cast = zext i24 %bf.load to i32
   ret i32 %bf.cast
 }
 
-define i32 @load3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+define i32 @load3align4(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: load3align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $1, 0($4)
@@ -98,13 +96,12 @@ define i32 @load3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    and $2, $1, $2
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align4* %S to i24*
-  %bf.load = load i24, i24* %0, align 4
+  %bf.load = load i24, ptr %S, align 4
   %bf.cast = zext i24 %bf.load to i32
   ret i32 %bf.cast
 }
 
-define i32 @load3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+define i32 @load3align8(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: load3align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $1, 0($4)
@@ -122,13 +119,12 @@ define i32 @load3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    and $2, $1, $2
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align8* %S to i24*
-  %bf.load = load i24, i24* %0, align 8
+  %bf.load = load i24, ptr %S, align 8
   %bf.cast = zext i24 %bf.load to i32
   ret i32 %bf.cast
 }
 
-define i64 @load5align1(%struct.MemSize5_Align1* %S) {
+define i64 @load5align1(ptr %S) {
 ; MIPS32-LABEL: load5align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -150,13 +146,12 @@ define i64 @load5align1(%struct.MemSize5_Align1* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 255
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align1* %S to i40*
-  %bf.load = load i40, i40* %0, align 1
+  %bf.load = load i40, ptr %S, align 1
   %bf.cast = zext i40 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load5align2(%struct.MemSize5_Align2* %S) {
+define i64 @load5align2(ptr %S) {
 ; MIPS32-LABEL: load5align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -178,13 +173,12 @@ define i64 @load5align2(%struct.MemSize5_Align2* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 255
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align2* %S to i40*
-  %bf.load = load i40, i40* %0, align 2
+  %bf.load = load i40, ptr %S, align 2
   %bf.cast = zext i40 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load5align4(%struct.MemSize5_Align4* %S) {
+define i64 @load5align4(ptr %S) {
 ; MIPS32-LABEL: load5align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -204,13 +198,12 @@ define i64 @load5align4(%struct.MemSize5_Align4* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 255
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align4* %S to i40*
-  %bf.load = load i40, i40* %0, align 4
+  %bf.load = load i40, ptr %S, align 4
   %bf.cast = zext i40 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load5align8(%struct.MemSize5_Align8* %S) {
+define i64 @load5align8(ptr %S) {
 ; MIPS32-LABEL: load5align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -230,13 +223,12 @@ define i64 @load5align8(%struct.MemSize5_Align8* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 255
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align8* %S to i40*
-  %bf.load = load i40, i40* %0, align 8
+  %bf.load = load i40, ptr %S, align 8
   %bf.cast = zext i40 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load6align1(%struct.MemSize6_Align1* %S) {
+define i64 @load6align1(ptr %S) {
 ; MIPS32-LABEL: load6align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -260,13 +252,12 @@ define i64 @load6align1(%struct.MemSize6_Align1* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 65535
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align1* %S to i48*
-  %bf.load = load i48, i48* %0, align 1
+  %bf.load = load i48, ptr %S, align 1
   %bf.cast = zext i48 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load6align2(%struct.MemSize6_Align2* %S) {
+define i64 @load6align2(ptr %S) {
 ; MIPS32-LABEL: load6align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -288,13 +279,12 @@ define i64 @load6align2(%struct.MemSize6_Align2* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 65535
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align2* %S to i48*
-  %bf.load = load i48, i48* %0, align 2
+  %bf.load = load i48, ptr %S, align 2
   %bf.cast = zext i48 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load6align4(%struct.MemSize6_Align4* %S) {
+define i64 @load6align4(ptr %S) {
 ; MIPS32-LABEL: load6align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -314,13 +304,12 @@ define i64 @load6align4(%struct.MemSize6_Align4* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 65535
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align4* %S to i48*
-  %bf.load = load i48, i48* %0, align 4
+  %bf.load = load i48, ptr %S, align 4
   %bf.cast = zext i48 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load6align8(%struct.MemSize6_Align8* %S) {
+define i64 @load6align8(ptr %S) {
 ; MIPS32-LABEL: load6align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -340,13 +329,12 @@ define i64 @load6align8(%struct.MemSize6_Align8* %S) {
 ; MIPS32R6-NEXT:    andi $3, $1, 65535
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align8* %S to i48*
-  %bf.load = load i48, i48* %0, align 8
+  %bf.load = load i48, ptr %S, align 8
   %bf.cast = zext i48 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load7align1(%struct.MemSize7_Align1* %S) {
+define i64 @load7align1(ptr %S) {
 ; MIPS32-LABEL: load7align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -374,13 +362,12 @@ define i64 @load7align1(%struct.MemSize7_Align1* %S) {
 ; MIPS32R6-NEXT:    and $3, $1, $3
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align1* %S to i56*
-  %bf.load = load i56, i56* %0, align 1
+  %bf.load = load i56, ptr %S, align 1
   %bf.cast = zext i56 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load7align2(%struct.MemSize7_Align2* %S) {
+define i64 @load7align2(ptr %S) {
 ; MIPS32-LABEL: load7align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    # implicit-def: $v0
@@ -408,13 +395,12 @@ define i64 @load7align2(%struct.MemSize7_Align2* %S) {
 ; MIPS32R6-NEXT:    and $3, $1, $3
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align2* %S to i56*
-  %bf.load = load i56, i56* %0, align 2
+  %bf.load = load i56, ptr %S, align 2
   %bf.cast = zext i56 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load7align4(%struct.MemSize7_Align4* %S) {
+define i64 @load7align4(ptr %S) {
 ; MIPS32-LABEL: load7align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -438,13 +424,12 @@ define i64 @load7align4(%struct.MemSize7_Align4* %S) {
 ; MIPS32R6-NEXT:    and $3, $1, $3
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align4* %S to i56*
-  %bf.load = load i56, i56* %0, align 4
+  %bf.load = load i56, ptr %S, align 4
   %bf.cast = zext i56 %bf.load to i64
   ret i64 %bf.cast
 }
 
-define i64 @load7align8(%struct.MemSize7_Align8* %S) {
+define i64 @load7align8(ptr %S) {
 ; MIPS32-LABEL: load7align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -468,8 +453,7 @@ define i64 @load7align8(%struct.MemSize7_Align8* %S) {
 ; MIPS32R6-NEXT:    and $3, $1, $3
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align8* %S to i56*
-  %bf.load = load i56, i56* %0, align 8
+  %bf.load = load i56, ptr %S, align 8
   %bf.cast = zext i56 %bf.load to i64
   ret i64 %bf.cast
 }
@@ -497,7 +481,7 @@ define double @load_double_align1() {
 ; MIPS32R6-NEXT:    ldc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load double, double* @double_align1, align 1
+  %0 = load double, ptr @double_align1, align 1
   ret double %0
 }
 
@@ -524,7 +508,7 @@ define double @load_double_align2() {
 ; MIPS32R6-NEXT:    ldc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load double, double* @double_align2, align 2
+  %0 = load double, ptr @double_align2, align 2
   ret double %0
 }
 
@@ -547,7 +531,7 @@ define double @load_double_align4() {
 ; MIPS32R6-NEXT:    ldc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load double, double* @double_align4, align 4
+  %0 = load double, ptr @double_align4, align 4
   ret double %0
 }
 
@@ -567,7 +551,7 @@ define double @load_double_align8() {
 ; MIPS32R6-NEXT:    ldc1 $f0, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load double, double* @double_align8, align 8
+  %0 = load double, ptr @double_align8, align 8
   ret double %0
 }
 
@@ -593,7 +577,7 @@ define i64 @load_i64_align1() {
 ; MIPS32R6-NEXT:    lw $3, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i64, i64* @i64_align1, align 1
+  %0 = load i64, ptr @i64_align1, align 1
   ret i64 %0
 }
 
@@ -619,7 +603,7 @@ define i64 @load_i64_align2() {
 ; MIPS32R6-NEXT:    lw $3, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i64, i64* @i64_align2, align 2
+  %0 = load i64, ptr @i64_align2, align 2
   ret i64 %0
 }
 
@@ -641,7 +625,7 @@ define i64 @load_i64_align4() {
 ; MIPS32R6-NEXT:    lw $3, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i64, i64* @i64_align4, align 4
+  %0 = load i64, ptr @i64_align4, align 4
   ret i64 %0
 }
 
@@ -663,6 +647,6 @@ define i64 @load_i64_align8() {
 ; MIPS32R6-NEXT:    lw $3, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = load i64, i64* @i64_align8, align 8
+  %0 = load i64, ptr @i64_align8, align 8
   ret i64 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_fold.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_fold.ll
index 8eed2cc54c981..721ea07108fe1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_fold.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_fold.ll
@@ -1,31 +1,31 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define zeroext i8 @_16_bit_positive_offset(i8* %a) {
+define zeroext i8 @_16_bit_positive_offset(ptr %a) {
 ; MIPS32-LABEL: _16_bit_positive_offset:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $2, 32767($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %a, i32 32767
-  %0 = load i8, i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %a, i32 32767
+  %0 = load i8, ptr %arrayidx
   ret i8 %0
 }
 
-define void @_16_bit_negative_offset(i8 %val, i8* %a) {
+define void @_16_bit_negative_offset(i8 %val, ptr %a) {
 ; MIPS32-LABEL: _16_bit_negative_offset:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sb $4, -32768($5)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %a, i32 -32768
-  store i8 %val, i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %a, i32 -32768
+  store i8 %val, ptr %arrayidx
   ret void
 }
 
-define void @_large_positive_offset(i8 %val, i8* %a) {
+define void @_large_positive_offset(i8 %val, ptr %a) {
 ; MIPS32-LABEL: _large_positive_offset:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 32768
@@ -34,12 +34,12 @@ define void @_large_positive_offset(i8 %val, i8* %a) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %a, i32 32768
-  store i8 %val, i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %a, i32 32768
+  store i8 %val, ptr %arrayidx
   ret void
 }
 
-define signext i8 @_large_negative_offset(i8* %a) {
+define signext i8 @_large_negative_offset(ptr %a) {
 ; MIPS32-LABEL: _large_negative_offset:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lui $1, 65535
@@ -49,56 +49,56 @@ define signext i8 @_large_negative_offset(i8* %a) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i8, i8* %a, i32 -32769
-  %0 = load i8, i8* %arrayidx
+  %arrayidx = getelementptr inbounds i8, ptr %a, i32 -32769
+  %0 = load i8, ptr %arrayidx
   ret i8 %0
 }
 
-define float @fold_f32_load(float* %a) {
+define float @fold_f32_load(ptr %a) {
 ; MIPS32-LABEL: fold_f32_load:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f0, 40($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds float, float* %a, i32 10
-  %0 = load float, float* %arrayidx
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 10
+  %0 = load float, ptr %arrayidx
   ret float %0
 }
 
-define void @fold_f64_store(double %val, double* %a) {
+define void @fold_f64_store(double %val, ptr %a) {
 ; MIPS32-LABEL: fold_f64_store:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sdc1 $f12, -80($6)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds double, double* %a, i32 -10
-  store double %val, double* %arrayidx
+  %arrayidx = getelementptr inbounds double, ptr %a, i32 -10
+  store double %val, ptr %arrayidx
   ret void
 }
 
-define i16 @fold_i16_load(i16* %a) {
+define i16 @fold_i16_load(ptr %a) {
 ; MIPS32-LABEL: fold_i16_load:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lhu $2, -20($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i16, i16* %a, i32 -10
-  %0 = load i16, i16* %arrayidx
+  %arrayidx = getelementptr inbounds i16, ptr %a, i32 -10
+  %0 = load i16, ptr %arrayidx
   ret i16 %0
 }
 
-define void @fold_i32_store(i32 %val, i32* %a) {
+define void @fold_i32_store(i32 %val, ptr %a) {
 ; MIPS32-LABEL: fold_i32_store:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $4, 40($5)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 10
-  store i32 %val, i32* %arrayidx
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 10
+  store i32 %val, ptr %arrayidx
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_vec.ll
index 6da35aa47f0a4..c09353f3e1ff7 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/load_store_vec.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) {
+define void @load_store_v16i8(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($5)
@@ -9,12 +9,12 @@ define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %b, align 16
-  store <16 x i8> %0, <16 x i8>* %a, align 16
+  %0 = load <16 x i8>, ptr %b, align 16
+  store <16 x i8> %0, ptr %a, align 16
   ret void
 }
 
-define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) {
+define void @load_store_v8i16(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($5)
@@ -22,12 +22,12 @@ define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %b, align 16
-  store <8 x i16> %0, <8 x i16>* %a, align 16
+  %0 = load <8 x i16>, ptr %b, align 16
+  store <8 x i16> %0, ptr %a, align 16
   ret void
 }
 
-define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) {
+define void @load_store_v4i32(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($5)
@@ -35,12 +35,12 @@ define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %b, align 16
-  store <4 x i32> %0, <4 x i32>* %a, align 16
+  %0 = load <4 x i32>, ptr %b, align 16
+  store <4 x i32> %0, ptr %a, align 16
   ret void
 }
 
-define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @load_store_v2i64(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($5)
@@ -48,12 +48,12 @@ define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %b, align 16
-  store <2 x i64> %0, <2 x i64>* %a, align 16
+  %0 = load <2 x i64>, ptr %b, align 16
+  store <2 x i64> %0, ptr %a, align 16
   ret void
 }
 
-define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) {
+define void @load_store_v4f32(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v4f32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($5)
@@ -61,12 +61,12 @@ define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float>* %b, align 16
-  store <4 x float> %0, <4 x float>* %a, align 16
+  %0 = load <4 x float>, ptr %b, align 16
+  store <4 x float> %0, ptr %a, align 16
   ret void
 }
 
-define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) {
+define void @load_store_v2f64(ptr %a, ptr %b) {
 ; P5600-LABEL: load_store_v2f64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($5)
@@ -74,7 +74,7 @@ define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double>* %b, align 16
-  store <2 x double> %0, <2 x double>* %a, align 16
+  %0 = load <2 x double>, ptr %b, align 16
+  store <2 x double> %0, ptr %a, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
index 0ff034bec1c50..4f4bac54e7287 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define void @long_chain_ambiguous_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+define void @long_chain_ambiguous_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_ambiguous_i32_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -48
@@ -141,15 +141,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load i32, i32* %a
+  %phi1.0 = load i32, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load i32, i32* %b
+  %phi1.1 = load i32, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load i32, i32* %c
+  %phi1.2 = load i32, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -157,18 +157,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store i32 %phi1, i32* %result
+  store i32 %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load i32, i32* %a
+  %phi2.0 = load i32, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load i32, i32* %b
+  %phi2.1 = load i32, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -176,7 +176,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store i32 %phi2, i32* %result
+  store i32 %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -184,13 +184,13 @@ b.PHI.3:
   %phi4 = phi i32 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
   %sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
-  store i32 %sel_3_1.2, i32* %result
-  store i32 %phi3, i32* %result
+  store i32 %sel_3_1.2, ptr %result
+  store i32 %phi3, ptr %result
   ret void
 
 }
 
-define void @long_chain_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+define void @long_chain_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_i32_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -56
@@ -336,15 +336,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load i32, i32* %a
+  %phi1.0 = load i32, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load i32, i32* %b
+  %phi1.1 = load i32, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load i32, i32* %c
+  %phi1.2 = load i32, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -352,18 +352,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store i32 %phi1, i32* %result
+  store i32 %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load i32, i32* %a
+  %phi2.0 = load i32, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load i32, i32* %b
+  %phi2.1 = load i32, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -371,7 +371,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store i32 %phi2, i32* %result
+  store i32 %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -379,12 +379,12 @@ b.PHI.3:
   %phi4 = phi i32 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
   %sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
-  store i32 %sel_3_1.2, i32* %result
-  store i32 %phi3, i32* %result
+  store i32 %sel_3_1.2, ptr %result
+  store i32 %phi3, ptr %result
   ret void
 }
 
-define void @long_chain_ambiguous_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+define void @long_chain_ambiguous_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_ambiguous_float_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -48
@@ -524,15 +524,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load float, float* %a
+  %phi1.0 = load float, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load float, float* %b
+  %phi1.1 = load float, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load float, float* %c
+  %phi1.2 = load float, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -540,18 +540,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store float %phi1, float* %result
+  store float %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load float, float* %a
+  %phi2.0 = load float, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load float, float* %b
+  %phi2.1 = load float, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -559,7 +559,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store float %phi2, float* %result
+  store float %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -567,13 +567,13 @@ b.PHI.3:
   %phi4 = phi float [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
   %sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
-  store float %sel_3_1.2, float* %result
-  store float %phi3, float* %result
+  store float %sel_3_1.2, ptr %result
+  store float %phi3, ptr %result
   ret void
 }
 
 
-define void @long_chain_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+define void @long_chain_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_float_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -56
@@ -720,15 +720,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load float, float* %a
+  %phi1.0 = load float, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load float, float* %b
+  %phi1.1 = load float, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load float, float* %c
+  %phi1.2 = load float, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -736,18 +736,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store float %phi1, float* %result
+  store float %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load float, float* %a
+  %phi2.0 = load float, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load float, float* %b
+  %phi2.1 = load float, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -755,7 +755,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store float %phi2, float* %result
+  store float %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -763,8 +763,8 @@ b.PHI.3:
   %phi4 = phi float [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
   %sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
-  store float %sel_3_1.2, float* %result
-  store float %phi3, float* %result
+  store float %sel_3_1.2, ptr %result
+  store float %phi3, ptr %result
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
index 7650194d30b1d..5697aabd03647 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_ambiguous_i64_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -72
@@ -141,15 +141,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load i64, i64* %a
+  %phi1.0 = load i64, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load i64, i64* %b
+  %phi1.1 = load i64, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load i64, i64* %c
+  %phi1.2 = load i64, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -157,18 +157,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store i64 %phi1, i64* %result
+  store i64 %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load i64, i64* %a
+  %phi2.0 = load i64, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load i64, i64* %b
+  %phi2.1 = load i64, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -176,7 +176,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store i64 %phi2, i64* %result
+  store i64 %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -184,13 +184,13 @@ b.PHI.3:
   %phi4 = phi i64 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
   %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-  store i64 %sel_3_1.2, i64* %result
-  store i64 %phi3, i64* %result
+  store i64 %sel_3_1.2, ptr %result
+  store i64 %phi3, ptr %result
   ret void
 
 }
 
-define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_i64_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -80
@@ -367,15 +367,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load i64, i64* %a
+  %phi1.0 = load i64, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load i64, i64* %b
+  %phi1.1 = load i64, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load i64, i64* %c
+  %phi1.2 = load i64, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -383,18 +383,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store i64 %phi1, i64* %result
+  store i64 %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load i64, i64* %a
+  %phi2.0 = load i64, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load i64, i64* %b
+  %phi2.1 = load i64, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -402,7 +402,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store i64 %phi2, i64* %result
+  store i64 %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -410,12 +410,12 @@ b.PHI.3:
   %phi4 = phi i64 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
   %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
-  store i64 %sel_3_1.2, i64* %result
-  store i64 %phi3, i64* %result
+  store i64 %sel_3_1.2, ptr %result
+  store i64 %phi3, ptr %result
   ret void
 }
 
-define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_ambiguous_double_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -72
@@ -555,15 +555,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load double, double* %a
+  %phi1.0 = load double, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load double, double* %b
+  %phi1.1 = load double, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load double, double* %c
+  %phi1.2 = load double, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -571,18 +571,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store double %phi1, double* %result
+  store double %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load double, double* %a
+  %phi2.0 = load double, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load double, double* %b
+  %phi2.1 = load double, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -590,7 +590,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store double %phi2, double* %result
+  store double %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -598,13 +598,13 @@ b.PHI.3:
   %phi4 = phi double [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
   %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-  store double %sel_3_1.2, double* %result
-  store double %phi3, double* %result
+  store double %sel_3_1.2, ptr %result
+  store double %phi3, ptr %result
   ret void
 }
 
 
-define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) {
 ; MIPS32-LABEL: long_chain_double_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -88
@@ -753,15 +753,15 @@ pre.PHI.1.0:
   br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
 
 b.PHI.1.0:
-  %phi1.0 = load double, double* %a
+  %phi1.0 = load double, ptr %a
   br label %b.PHI.1
 
 b.PHI.1.1:
-  %phi1.1 = load double, double* %b
+  %phi1.1 = load double, ptr %b
   br label %b.PHI.1
 
 b.PHI.1.2:
-  %phi1.2 = load double, double* %c
+  %phi1.2 = load double, ptr %c
   br label %b.PHI.1
 
 b.PHI.1:
@@ -769,18 +769,18 @@ b.PHI.1:
   br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
 
 b.PHI.1.end:
-  store double %phi1, double* %result
+  store double %phi1, ptr %result
   ret void
 
 pre.PHI.2:
   br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
 
 b.PHI.2.0:
-  %phi2.0 = load double, double* %a
+  %phi2.0 = load double, ptr %a
   br label %b.PHI.2
 
 b.PHI.2.1:
-  %phi2.1 = load double, double* %b
+  %phi2.1 = load double, ptr %b
   br label %b.PHI.2
 
 b.PHI.2:
@@ -788,7 +788,7 @@ b.PHI.2:
    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
 
 b.PHI.2.end:
-  store double %phi2, double* %result
+  store double %phi2, ptr %result
   ret void
 
 b.PHI.3:
@@ -796,8 +796,8 @@ b.PHI.3:
   %phi4 = phi double [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
   %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
   %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
-  store double %sel_3_1.2, double* %result
-  store double %phi3, double* %result
+  store double %sel_3_1.2, ptr %result
+  store double %phi3, ptr %result
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll
index 7195ccf496793..44266f84379e1 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll
@@ -179,7 +179,7 @@ entry:
 }
 
 declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
-define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) {
+define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) {
 ; MIPS32-LABEL: umul_with_overflow:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    multu $4, $5
@@ -195,7 +195,7 @@ define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag
   %res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
   %carry_flag = extractvalue { i32, i1 } %res, 1
   %mul = extractvalue { i32, i1 } %res, 0
-  store i1 %carry_flag, i1* %pcarry_flag
-  store i32 %mul, i32* %pmul
+  store i1 %carry_flag, ptr %pcarry_flag
+  store i32 %mul, ptr %pmul
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec.ll
index a71e75958cddf..1e92d47e60a9d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @mul_v16i8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w1, 0($4)
@@ -11,14 +11,14 @@ define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %mul = mul <16 x i8> %1, %0
-  store <16 x i8> %mul, <16 x i8>* %c, align 16
+  store <16 x i8> %mul, ptr %c, align 16
   ret void
 }
 
-define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @mul_v8i16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w1, 0($4)
@@ -28,14 +28,14 @@ define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %mul = mul <8 x i16> %1, %0
-  store <8 x i16> %mul, <8 x i16>* %c, align 16
+  store <8 x i16> %mul, ptr %c, align 16
   ret void
 }
 
-define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @mul_v4i32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w1, 0($4)
@@ -45,14 +45,14 @@ define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %mul = mul <4 x i32> %1, %0
-  store <4 x i32> %mul, <4 x i32>* %c, align 16
+  store <4 x i32> %mul, ptr %c, align 16
   ret void
 }
 
-define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @mul_v2i64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w1, 0($4)
@@ -62,9 +62,9 @@ define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %mul = mul <2 x i64> %1, %0
-  store <2 x i64> %mul, <2 x i64>* %c, align 16
+  store <2 x i64> %mul, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec_builtin.ll
index 0e5be50320c0c..732ab42940418 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/mul_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>)
-define void @mul_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @mul_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v16i8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -12,15 +12,15 @@ define void @mul_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>)
-define void @mul_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @mul_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v8i16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -30,15 +30,15 @@ define void @mul_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>)
-define void @mul_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @mul_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v4i32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -48,15 +48,15 @@ define void @mul_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>)
-define void @mul_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @mul_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: mul_v2i64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -66,9 +66,9 @@ define void @mul_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
index 100ea22d3cf29..ab711106e06f6 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
@@ -222,7 +222,7 @@ cond.end:
   ret i64 %cond
 }
 
-define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
 ; MIPS32-LABEL: phi_ambiguous_i64_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -32
@@ -256,8 +256,8 @@ define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b,
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i64, i64* %i64_ptr_a, align 8
-  %1 = load i64, i64* %i64_ptr_b, align 8
+  %0 = load i64, ptr %i64_ptr_a, align 8
+  %1 = load i64, ptr %i64_ptr_b, align 8
   br i1 %cnd, label %cond.true, label %cond.false
 
 cond.true:
@@ -268,7 +268,7 @@ cond.false:
 
 cond.end:
   %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
-  store i64 %cond, i64* %i64_ptr_c, align 8
+  store i64 %cond, ptr %i64_ptr_c, align 8
   ret void
 }
 
@@ -315,7 +315,7 @@ cond.end:
   ret float %cond
 }
 
-define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
 ; MIPS32-LABEL: phi_ambiguous_float_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -16
@@ -349,8 +349,8 @@ define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %f32_ptr_a, align 4
-  %1 = load float, float* %f32_ptr_b, align 4
+  %0 = load float, ptr %f32_ptr_a, align 4
+  %1 = load float, ptr %f32_ptr_b, align 4
   br i1 %cnd, label %cond.true, label %cond.false
 
 cond.true:
@@ -361,7 +361,7 @@ cond.false:
 
 cond.end:
   %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
-  store float %cond, float* %f32_ptr_c, align 4
+  store float %cond, ptr %f32_ptr_c, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll
index b274167a5cb55..05c2c66a80e11 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @ptr_arg_in_regs(i32* %p) {
+define i32 @ptr_arg_in_regs(ptr %p) {
 ; MIPS32-LABEL: ptr_arg_in_regs:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %p
+  %0 = load i32, ptr %p
   ret i32 %0
 }
 
-define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
+define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {
 ; MIPS32-LABEL: ptr_arg_on_stack:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $1, $sp, 16
@@ -21,16 +21,16 @@ define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %p
+  %0 = load i32, ptr %p
   ret i32 %0
 }
 
-define i8* @ret_ptr(i8* %p) {
+define ptr @ret_ptr(ptr %p) {
 ; MIPS32-LABEL: ret_ptr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    move $2, $4
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  ret i8* %p
+  ret ptr %p
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
index 29b41b454b5a3..cf3f2a9549ea2 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -11,14 +11,14 @@ define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %div = sdiv <16 x i8> %0, %1
-  store <16 x i8> %div, <16 x i8>* %c, align 16
+  store <16 x i8> %div, ptr %c, align 16
   ret void
 }
 
-define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -28,14 +28,14 @@ define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %div = sdiv <8 x i16> %0, %1
-  store <8 x i16> %div, <8 x i16>* %c, align 16
+  store <8 x i16> %div, ptr %c, align 16
   ret void
 }
 
-define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -45,14 +45,14 @@ define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %div = sdiv <4 x i32> %0, %1
-  store <4 x i32> %div, <4 x i32>* %c, align 16
+  store <4 x i32> %div, ptr %c, align 16
   ret void
 }
 
-define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -62,14 +62,14 @@ define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %div = sdiv <2 x i64> %0, %1
-  store <2 x i64> %div, <2 x i64>* %c, align 16
+  store <2 x i64> %div, ptr %c, align 16
   ret void
 }
 
-define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @srem_v16i8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: srem_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -79,14 +79,14 @@ define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %rem = srem <16 x i8> %0, %1
-  store <16 x i8> %rem, <16 x i8>* %c, align 16
+  store <16 x i8> %rem, ptr %c, align 16
   ret void
 }
 
-define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @srem_v8i16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: srem_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -96,14 +96,14 @@ define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %rem = srem <8 x i16> %0, %1
-  store <8 x i16> %rem, <8 x i16>* %c, align 16
+  store <8 x i16> %rem, ptr %c, align 16
   ret void
 }
 
-define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @srem_v4i32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: srem_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -113,14 +113,14 @@ define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %rem = srem <4 x i32> %0, %1
-  store <4 x i32> %rem, <4 x i32>* %c, align 16
+  store <4 x i32> %rem, ptr %c, align 16
   ret void
 }
 
-define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @srem_v2i64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: srem_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -130,14 +130,14 @@ define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %rem = srem <2 x i64> %0, %1
-  store <2 x i64> %rem, <2 x i64>* %c, align 16
+  store <2 x i64> %rem, ptr %c, align 16
   ret void
 }
 
-define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v16u8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -147,14 +147,14 @@ define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %div = udiv <16 x i8> %0, %1
-  store <16 x i8> %div, <16 x i8>* %c, align 16
+  store <16 x i8> %div, ptr %c, align 16
   ret void
 }
 
-define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v8u16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -164,14 +164,14 @@ define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %div = udiv <8 x i16> %0, %1
-  store <8 x i16> %div, <8 x i16>* %c, align 16
+  store <8 x i16> %div, ptr %c, align 16
   ret void
 }
 
-define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v4u32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -181,14 +181,14 @@ define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %div = udiv <4 x i32> %0, %1
-  store <4 x i32> %div, <4 x i32>* %c, align 16
+  store <4 x i32> %div, ptr %c, align 16
   ret void
 }
 
-define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v2u64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -198,14 +198,14 @@ define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %div = udiv <2 x i64> %0, %1
-  store <2 x i64> %div, <2 x i64>* %c, align 16
+  store <2 x i64> %div, ptr %c, align 16
   ret void
 }
 
-define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @urem_v16u8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: urem_v16u8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -215,14 +215,14 @@ define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %rem = urem <16 x i8> %0, %1
-  store <16 x i8> %rem, <16 x i8>* %c, align 16
+  store <16 x i8> %rem, ptr %c, align 16
   ret void
 }
 
-define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @urem_v8u16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: urem_v8u16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -232,14 +232,14 @@ define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %rem = urem <8 x i16> %0, %1
-  store <8 x i16> %rem, <8 x i16>* %c, align 16
+  store <8 x i16> %rem, ptr %c, align 16
   ret void
 }
 
-define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @urem_v4u32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: urem_v4u32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -249,14 +249,14 @@ define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %rem = urem <4 x i32> %0, %1
-  store <4 x i32> %rem, <4 x i32>* %c, align 16
+  store <4 x i32> %rem, ptr %c, align 16
   ret void
 }
 
-define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @urem_v2u64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: urem_v2u64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -266,9 +266,9 @@ define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %rem = urem <2 x i64> %0, %1
-  store <2 x i64> %rem, <2 x i64>* %c, align 16
+  store <2 x i64> %rem, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
index 8246971fe7594..509be404d4737 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>)
-define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sdiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v16i8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -12,15 +12,15 @@ define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>)
-define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sdiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v8i16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -30,15 +30,15 @@ define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>)
-define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sdiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v4i32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -48,15 +48,15 @@ define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>)
-define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sdiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sdiv_v2i64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -66,15 +66,15 @@ define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }
 
 declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>)
-define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @smod_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: smod_v16i8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -84,15 +84,15 @@ define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>)
-define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @smod_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: smod_v8i16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -102,15 +102,15 @@ define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>)
-define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @smod_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: smod_v4i32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -120,15 +120,15 @@ define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>)
-define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @smod_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: smod_v2i64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -138,15 +138,15 @@ define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }
 
 declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>)
-define void @udiv_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @udiv_v16u8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v16u8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -156,15 +156,15 @@ define void @udiv_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>)
-define void @udiv_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @udiv_v8u16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v8u16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -174,15 +174,15 @@ define void @udiv_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>)
-define void @udiv_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @udiv_v4u32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v4u32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -192,15 +192,15 @@ define void @udiv_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>)
-define void @udiv_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @udiv_v2u64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: udiv_v2u64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -210,15 +210,15 @@ define void @udiv_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }
 
 declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>)
-define void @umod_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @umod_v16u8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: umod_v16u8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -228,15 +228,15 @@ define void @umod_v16u8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>)
-define void @umod_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @umod_v8u16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: umod_v8u16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -246,15 +246,15 @@ define void @umod_v8u16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>)
-define void @umod_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @umod_v4u32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: umod_v4u32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -264,15 +264,15 @@ define void @umod_v4u32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>)
-define void @umod_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @umod_v2u64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: umod_v2u64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -282,9 +282,9 @@ define void @umod_v2u64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
index 7cc62e272ccc4..30076a5ec7187 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
@@ -40,7 +40,7 @@ entry:
   ret i32 %cond
 }
 
-define i32* @select_ptr(i1 %test, i32* %a, i32* %b) {
+define ptr @select_ptr(i1 %test, ptr %a, ptr %b) {
 ; MIPS32-LABEL: select_ptr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    move $2, $6
@@ -49,8 +49,8 @@ define i32* @select_ptr(i1 %test, i32* %a, i32* %b) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %cond = select i1 %test, i32* %a, i32* %b
-  ret i32* %cond
+  %cond = select i1 %test, ptr %a, ptr %b
+  ret ptr %cond
 }
 
 define i32 @select_with_negation(i32 %a, i32 %b, i32 %x, i32 %y) {
@@ -87,7 +87,7 @@ entry:
   ret i64 %cond
 }
 
-define void @select_ambiguous_i64_in_fpr(i1 %test, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+define void @select_ambiguous_i64_in_fpr(i1 %test, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {
 ; MIPS32-LABEL: select_ambiguous_i64_in_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ldc1 $f2, 0($5)
@@ -98,10 +98,10 @@ define void @select_ambiguous_i64_in_fpr(i1 %test, i64* %i64_ptr_a, i64* %i64_pt
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i64, i64* %i64_ptr_a, align 8
-  %1 = load i64, i64* %i64_ptr_b, align 8
+  %0 = load i64, ptr %i64_ptr_a, align 8
+  %1 = load i64, ptr %i64_ptr_b, align 8
   %cond = select i1 %test, i64 %0, i64 %1
-  store i64 %cond, i64* %i64_ptr_c, align 8
+  store i64 %cond, ptr %i64_ptr_c, align 8
   ret void
 }
 
@@ -119,7 +119,7 @@ entry:
   ret float %cond
 }
 
-define void @select_ambiguous_float_in_gpr(i1 %test, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+define void @select_ambiguous_float_in_gpr(i1 %test, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {
 ; MIPS32-LABEL: select_ambiguous_float_in_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($5)
@@ -130,10 +130,10 @@ define void @select_ambiguous_float_in_gpr(i1 %test, float* %f32_ptr_a, float* %
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %f32_ptr_a, align 4
-  %1 = load float, float* %f32_ptr_b, align 4
+  %0 = load float, ptr %f32_ptr_a, align 4
+  %1 = load float, ptr %f32_ptr_b, align 4
   %cond = select i1 %test, float %0, float %1
-  store float %cond, float* %f32_ptr_c, align 4
+  store float %cond, ptr %f32_ptr_c, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sret_pointer.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sret_pointer.ll
index 2f22950340583..df3582f4b9bb7 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sret_pointer.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sret_pointer.ll
@@ -3,7 +3,7 @@
 
 %struct.S = type { i32, i32 }
 
-define void @ZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @ZeroInit(ptr noalias sret(%struct.S) %agg.result) {
 ; MIPS32-LABEL: ZeroInit:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 0
@@ -12,14 +12,13 @@ define void @ZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %x = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 0
-  store i32 0, i32* %x, align 4
-  %y = getelementptr inbounds %struct.S, %struct.S* %agg.result, i32 0, i32 1
-  store i32 0, i32* %y, align 4
+  store i32 0, ptr %agg.result, align 4
+  %y = getelementptr inbounds %struct.S, ptr %agg.result, i32 0, i32 1
+  store i32 0, ptr %y, align 4
   ret void
 }
 
-define void @CallZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
+define void @CallZeroInit(ptr noalias sret(%struct.S) %agg.result) {
 ; MIPS32-LABEL: CallZeroInit:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -24
@@ -33,6 +32,6 @@ define void @CallZeroInit(%struct.S* noalias sret(%struct.S) %agg.result) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  call void @ZeroInit(%struct.S* sret(%struct.S) %agg.result)
+  call void @ZeroInit(ptr sret(%struct.S) %agg.result)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store.ll
index be797473677a6..f2a7f8a947fbb 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store.ll
@@ -1,18 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define void @store_i32(i32 %val, i32* %ptr)  {
+define void @store_i32(i32 %val, ptr %ptr)  {
 ; MIPS32-LABEL: store_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $4, 0($5)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  store i32 %val, i32* %ptr
+  store i32 %val, ptr %ptr
   ret void
 }
 
-define void @store_i64(i64 %val, i64* %ptr)  {
+define void @store_i64(i64 %val, ptr %ptr)  {
 ; MIPS32-LABEL: store_i64:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $4, 0($6)
@@ -20,28 +20,28 @@ define void @store_i64(i64 %val, i64* %ptr)  {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  store i64 %val, i64* %ptr
+  store i64 %val, ptr %ptr
   ret void
 }
 
-define void @store_float(float %val, float* %ptr)  {
+define void @store_float(float %val, ptr %ptr)  {
 ; MIPS32-LABEL: store_float:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    swc1 $f12, 0($5)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  store float %val, float* %ptr
+  store float %val, ptr %ptr
   ret void
 }
 
-define void @store_double(double %val, double* %ptr)  {
+define void @store_double(double %val, ptr %ptr)  {
 ; MIPS32-LABEL: store_double:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sdc1 $f12, 0($6)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  store double %val, double* %ptr
+  store double %val, ptr %ptr
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_4_unaligned.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_4_unaligned.ll
index 256655a054694..0f6e400abcb40 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_4_unaligned.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_4_unaligned.ll
@@ -29,7 +29,7 @@ define void @store_float_align1(float %a) {
 ; MIPS32R6-NEXT:    swc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store float %a, float* @float_align1, align 1
+  store float %a, ptr @float_align1, align 1
   ret void
 }
 
@@ -51,7 +51,7 @@ define void @store_float_align2(float %a) {
 ; MIPS32R6-NEXT:    swc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store float %a, float* @float_align2, align 2
+  store float %a, ptr @float_align2, align 2
   ret void
 }
 
@@ -71,7 +71,7 @@ define void @store_float_align4(float %a) {
 ; MIPS32R6-NEXT:    swc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store float %a, float* @float_align4, align 4
+  store float %a, ptr @float_align4, align 4
   ret void
 }
 
@@ -91,7 +91,7 @@ define void @store_float_align8(float %a) {
 ; MIPS32R6-NEXT:    swc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store float %a, float* @float_align8, align 8
+  store float %a, ptr @float_align8, align 8
   ret void
 }
 
@@ -112,7 +112,7 @@ define void @store_i32_align1(i32 signext %a) {
 ; MIPS32R6-NEXT:    sw $4, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i32 %a, i32* @i32_align1, align 1
+  store i32 %a, ptr @i32_align1, align 1
   ret void
 }
 
@@ -133,7 +133,7 @@ define void @store_i32_align2(i32 signext %a) {
 ; MIPS32R6-NEXT:    sw $4, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i32 %a, i32* @i32_align2, align 2
+  store i32 %a, ptr @i32_align2, align 2
   ret void
 }
 
@@ -153,7 +153,7 @@ define void @store_i32_align4(i32 signext %a) {
 ; MIPS32R6-NEXT:    sw $4, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i32 %a, i32* @i32_align4, align 4
+  store i32 %a, ptr @i32_align4, align 4
   ret void
 }
 
@@ -173,6 +173,6 @@ define void @store_i32_align8(i32 signext %a) {
 ; MIPS32R6-NEXT:    sw $4, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i32 %a, i32* @i32_align8, align 8
+  store i32 %a, ptr @i32_align8, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
index 333b24a93684c..f15950a0bb08b 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/store_split_because_of_memsize_or_align.ll
@@ -28,7 +28,7 @@
 @i64_align4 = common global i64 0, align 4
 @i64_align8 = common global i64 0, align 8
 
-define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
+define void @store3align1(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: store3align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sb $5, 0($4)
@@ -46,13 +46,12 @@ define void @store3align1(%struct.MemSize3_Align1* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align1* %S to i24*
-  %1 = trunc i32 %a to i24
-  store i24 %1, i24* %0, align 1
+  %0 = trunc i32 %a to i24
+  store i24 %0, ptr %S, align 1
   ret void
 }
 
-define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
+define void @store3align2(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: store3align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sh $5, 0($4)
@@ -68,13 +67,12 @@ define void @store3align2(%struct.MemSize3_Align2* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align2* %S to i24*
-  %1 = trunc i32 %a to i24
-  store i24 %1, i24* %0, align 2
+  %0 = trunc i32 %a to i24
+  store i24 %0, ptr %S, align 2
   ret void
 }
 
-define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
+define void @store3align4(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: store3align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sh $5, 0($4)
@@ -90,13 +88,12 @@ define void @store3align4(%struct.MemSize3_Align4* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align4* %S to i24*
-  %1 = trunc i32 %a to i24
-  store i24 %1, i24* %0, align 4
+  %0 = trunc i32 %a to i24
+  store i24 %0, ptr %S, align 4
   ret void
 }
 
-define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
+define void @store3align8(ptr %S, i32 signext %a) {
 ; MIPS32-LABEL: store3align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sh $5, 0($4)
@@ -112,13 +109,12 @@ define void @store3align8(%struct.MemSize3_Align8* %S, i32 signext %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize3_Align8* %S to i24*
-  %1 = trunc i32 %a to i24
-  store i24 %1, i24* %0, align 8
+  %0 = trunc i32 %a to i24
+  store i24 %0, ptr %S, align 8
   ret void
 }
 
-define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
+define void @store5align1(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store5align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    swl $6, 3($4)
@@ -133,13 +129,12 @@ define void @store5align1(%struct.MemSize5_Align1* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align1* %S to i40*
-  %1 = trunc i64 %a to i40
-  store i40 %1, i40* %0, align 1
+  %0 = trunc i64 %a to i40
+  store i40 %0, ptr %S, align 1
   ret void
 }
 
-define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
+define void @store5align2(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store5align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    swl $6, 3($4)
@@ -154,13 +149,12 @@ define void @store5align2(%struct.MemSize5_Align2* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align2* %S to i40*
-  %1 = trunc i64 %a to i40
-  store i40 %1, i40* %0, align 2
+  %0 = trunc i64 %a to i40
+  store i40 %0, ptr %S, align 2
   ret void
 }
 
-define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
+define void @store5align4(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store5align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $6, 0($4)
@@ -174,13 +168,12 @@ define void @store5align4(%struct.MemSize5_Align4* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align4* %S to i40*
-  %1 = trunc i64 %a to i40
-  store i40 %1, i40* %0, align 4
+  %0 = trunc i64 %a to i40
+  store i40 %0, ptr %S, align 4
   ret void
 }
 
-define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
+define void @store5align8(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store5align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $6, 0($4)
@@ -194,13 +187,12 @@ define void @store5align8(%struct.MemSize5_Align8* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize5_Align8* %S to i40*
-  %1 = trunc i64 %a to i40
-  store i40 %1, i40* %0, align 8
+  %0 = trunc i64 %a to i40
+  store i40 %0, ptr %S, align 8
   ret void
 }
 
-define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
+define void @store6align1(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store6align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 4
@@ -219,13 +211,12 @@ define void @store6align1(%struct.MemSize6_Align1* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sh $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align1* %S to i48*
-  %1 = trunc i64 %a to i48
-  store i48 %1, i48* %0, align 1
+  %0 = trunc i64 %a to i48
+  store i48 %0, ptr %S, align 1
   ret void
 }
 
-define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
+define void @store6align2(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store6align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    swl $6, 3($4)
@@ -240,13 +231,12 @@ define void @store6align2(%struct.MemSize6_Align2* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sh $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align2* %S to i48*
-  %1 = trunc i64 %a to i48
-  store i48 %1, i48* %0, align 2
+  %0 = trunc i64 %a to i48
+  store i48 %0, ptr %S, align 2
   ret void
 }
 
-define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
+define void @store6align4(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store6align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $6, 0($4)
@@ -260,13 +250,12 @@ define void @store6align4(%struct.MemSize6_Align4* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sh $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align4* %S to i48*
-  %1 = trunc i64 %a to i48
-  store i48 %1, i48* %0, align 4
+  %0 = trunc i64 %a to i48
+  store i48 %0, ptr %S, align 4
   ret void
 }
 
-define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
+define void @store6align8(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store6align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    sw $6, 0($4)
@@ -280,13 +269,12 @@ define void @store6align8(%struct.MemSize6_Align8* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sh $7, 4($4)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize6_Align8* %S to i48*
-  %1 = trunc i64 %a to i48
-  store i48 %1, i48* %0, align 8
+  %0 = trunc i64 %a to i48
+  store i48 %0, ptr %S, align 8
   ret void
 }
 
-define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
+define void @store7align1(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store7align1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 4
@@ -311,13 +299,12 @@ define void @store7align1(%struct.MemSize7_Align1* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($2)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align1* %S to i56*
-  %1 = trunc i64 %a to i56
-  store i56 %1, i56* %0, align 1
+  %0 = trunc i64 %a to i56
+  store i56 %0, ptr %S, align 1
   ret void
 }
 
-define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
+define void @store7align2(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store7align2:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 4
@@ -340,13 +327,12 @@ define void @store7align2(%struct.MemSize7_Align2* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($2)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align2* %S to i56*
-  %1 = trunc i64 %a to i56
-  store i56 %1, i56* %0, align 2
+  %0 = trunc i64 %a to i56
+  store i56 %0, ptr %S, align 2
   ret void
 }
 
-define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
+define void @store7align4(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store7align4:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 4
@@ -368,13 +354,12 @@ define void @store7align4(%struct.MemSize7_Align4* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($2)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align4* %S to i56*
-  %1 = trunc i64 %a to i56
-  store i56 %1, i56* %0, align 4
+  %0 = trunc i64 %a to i56
+  store i56 %0, ptr %S, align 4
   ret void
 }
 
-define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
+define void @store7align8(ptr %S, i64 %a) {
 ; MIPS32-LABEL: store7align8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    ori $1, $zero, 4
@@ -396,9 +381,8 @@ define void @store7align8(%struct.MemSize7_Align8* %S, i64 %a) {
 ; MIPS32R6-NEXT:    sb $1, 2($2)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  %0 = bitcast %struct.MemSize7_Align8* %S to i56*
-  %1 = trunc i64 %a to i56
-  store i56 %1, i56* %0, align 8
+  %0 = trunc i64 %a to i56
+  store i56 %0, ptr %S, align 8
   ret void
 }
 
@@ -423,7 +407,7 @@ define void @store_double_align1(double %a) {
 ; MIPS32R6-NEXT:    sdc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store double %a, double* @double_align1, align 1
+  store double %a, ptr @double_align1, align 1
   ret void
 }
 
@@ -448,7 +432,7 @@ define void @store_double_align2(double %a) {
 ; MIPS32R6-NEXT:    sdc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store double %a, double* @double_align2, align 2
+  store double %a, ptr @double_align2, align 2
   ret void
 }
 
@@ -471,7 +455,7 @@ define void @store_double_align4(double %a) {
 ; MIPS32R6-NEXT:    sdc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store double %a, double* @double_align4, align 4
+  store double %a, ptr @double_align4, align 4
   ret void
 }
 
@@ -491,7 +475,7 @@ define void @store_double_align8(double %a) {
 ; MIPS32R6-NEXT:    sdc1 $f12, 0($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store double %a, double* @double_align8, align 8
+  store double %a, ptr @double_align8, align 8
   ret void
 }
 
@@ -515,7 +499,7 @@ define void @store_i64_align1(i64 %a) {
 ; MIPS32R6-NEXT:    sw $5, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i64 %a, i64* @i64_align1, align 1
+  store i64 %a, ptr @i64_align1, align 1
   ret void
 }
 
@@ -539,7 +523,7 @@ define void @store_i64_align2(i64 signext %a) {
 ; MIPS32R6-NEXT:    sw $5, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i64 %a, i64* @i64_align2, align 2
+  store i64 %a, ptr @i64_align2, align 2
   ret void
 }
 
@@ -561,7 +545,7 @@ define void @store_i64_align4(i64 %a) {
 ; MIPS32R6-NEXT:    sw $5, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i64 %a, i64* @i64_align4, align 4
+  store i64 %a, ptr @i64_align4, align 4
   ret void
 }
 
@@ -583,6 +567,6 @@ define void @store_i64_align8(i64 signext %a) {
 ; MIPS32R6-NEXT:    sw $5, 4($1)
 ; MIPS32R6-NEXT:    jrc $ra
 entry:
-  store i64 %a, i64* @i64_align8, align 8
+  store i64 %a, ptr @i64_align8, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec.ll
index 8ce695f073629..cf7423456ffdc 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
-define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sub_v16i8(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v16i8:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w1, 0($4)
@@ -11,14 +11,14 @@ define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %sub = sub <16 x i8> %1, %0
-  store <16 x i8> %sub, <16 x i8>* %c, align 16
+  store <16 x i8> %sub, ptr %c, align 16
   ret void
 }
 
-define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sub_v8i16(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v8i16:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w1, 0($4)
@@ -28,14 +28,14 @@ define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %sub = sub <8 x i16> %1, %0
-  store <8 x i16> %sub, <8 x i16>* %c, align 16
+  store <8 x i16> %sub, ptr %c, align 16
   ret void
 }
 
-define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sub_v4i32(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v4i32:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w1, 0($4)
@@ -45,14 +45,14 @@ define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %sub = sub <4 x i32> %1, %0
-  store <4 x i32> %sub, <4 x i32>* %c, align 16
+  store <4 x i32> %sub, ptr %c, align 16
   ret void
 }
 
-define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sub_v2i64(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v2i64:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w1, 0($4)
@@ -62,9 +62,9 @@ define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %sub = sub <2 x i64> %1, %0
-  store <2 x i64> %sub, <2 x i64>* %c, align 16
+  store <2 x i64> %sub, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec_builtin.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec_builtin.ll
index 5e7952146b236..78c6a3a9fe2d9 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec_builtin.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sub_vec_builtin.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=P5600
 
 declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>)
-define void @sub_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
+define void @sub_v16i8_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v16i8_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -12,15 +12,15 @@ define void @sub_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
-  %1 = load <16 x i8>, <16 x i8>* %b, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
+  %1 = load <16 x i8>, ptr %b, align 16
   %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c, align 16
+  store <16 x i8> %2, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>)
-define void @sub_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
+define void @sub_v8i16_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v8i16_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -30,15 +30,15 @@ define void @sub_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
-  %1 = load <8 x i16>, <8 x i16>* %b, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
+  %1 = load <8 x i16>, ptr %b, align 16
   %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c, align 16
+  store <8 x i16> %2, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>)
-define void @sub_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
+define void @sub_v4i32_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v4i32_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -48,15 +48,15 @@ define void @sub_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
-  %1 = load <4 x i32>, <4 x i32>* %b, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
+  %1 = load <4 x i32>, ptr %b, align 16
   %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c, align 16
+  store <4 x i32> %2, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>)
-define void @sub_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
+define void @sub_v2i64_builtin(ptr %a, ptr %b, ptr %c) {
 ; P5600-LABEL: sub_v2i64_builtin:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -66,15 +66,15 @@ define void @sub_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
-  %1 = load <2 x i64>, <2 x i64>* %b, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
+  %1 = load <2 x i64>, ptr %b, align 16
   %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c, align 16
+  store <2 x i64> %2, ptr %c, align 16
   ret void
 }
 
 declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32 immarg)
-define void @sub_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
+define void @sub_v16i8_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: sub_v16i8_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.b $w0, 0($4)
@@ -83,14 +83,14 @@ define void @sub_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <16 x i8>, <16 x i8>* %a, align 16
+  %0 = load <16 x i8>, ptr %a, align 16
   %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 3)
-  store <16 x i8> %1, <16 x i8>* %c, align 16
+  store <16 x i8> %1, ptr %c, align 16
   ret void
 }
 
 declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32 immarg)
-define void @sub_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
+define void @sub_v8i16_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: sub_v8i16_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.h $w0, 0($4)
@@ -99,14 +99,14 @@ define void @sub_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <8 x i16>, <8 x i16>* %a, align 16
+  %0 = load <8 x i16>, ptr %a, align 16
   %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 18)
-  store <8 x i16> %1, <8 x i16>* %c, align 16
+  store <8 x i16> %1, ptr %c, align 16
   ret void
 }
 
 declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32 immarg)
-define void @sub_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
+define void @sub_v4i32_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: sub_v4i32_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.w $w0, 0($4)
@@ -115,14 +115,14 @@ define void @sub_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a, align 16
+  %0 = load <4 x i32>, ptr %a, align 16
   %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 25)
-  store <4 x i32> %1, <4 x i32>* %c, align 16
+  store <4 x i32> %1, ptr %c, align 16
   ret void
 }
 
 declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32 immarg)
-define void @sub_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
+define void @sub_v2i64_builtin_imm(ptr %a, ptr %c) {
 ; P5600-LABEL: sub_v2i64_builtin_imm:
 ; P5600:       # %bb.0: # %entry
 ; P5600-NEXT:    ld.d $w0, 0($4)
@@ -131,8 +131,8 @@ define void @sub_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) {
 ; P5600-NEXT:    jr $ra
 ; P5600-NEXT:    nop
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a, align 16
+  %0 = load <2 x i64>, ptr %a, align 16
   %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 31)
-  store <2 x i64> %1, <2 x i64>* %c, align 16
+  store <2 x i64> %1, ptr %c, align 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
index d81e3edf8dd30..c14caf68c0201 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
@@ -1,29 +1,29 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @outgoing_gpr(i32* %i32_ptr) {
+define i32 @outgoing_gpr(ptr %i32_ptr) {
 ; MIPS32-LABEL: outgoing_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %i32_ptr
+  %0 = load i32, ptr %i32_ptr
   ret i32 %0
 }
 
-define float @outgoing_fpr(float* %float_ptr) {
+define float @outgoing_fpr(ptr %float_ptr) {
 ; MIPS32-LABEL: outgoing_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f0, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %float_ptr
+  %0 = load float, ptr %float_ptr
   ret float %0
 }
 
-define i32 @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {
+define i32 @outgoing_gpr_instr(ptr %i32_ptr1, ptr %i32_ptr2) {
 ; MIPS32-LABEL: outgoing_gpr_instr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -32,13 +32,13 @@ define i32 @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %i32_ptr1
-  %1 = load i32, i32* %i32_ptr2
+  %0 = load i32, ptr %i32_ptr1
+  %1 = load i32, ptr %i32_ptr2
   %outgoing_instr = add i32 %1, %0
   ret i32 %outgoing_instr
 }
 
-define float @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {
+define float @outgoing_fpr_instr(ptr %float_ptr1, ptr %float_ptr2) {
 ; MIPS32-LABEL: outgoing_fpr_instr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f0, 0($4)
@@ -47,13 +47,13 @@ define float @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %float_ptr1
-  %1 = load float, float* %float_ptr2
+  %0 = load float, ptr %float_ptr1
+  %1 = load float, ptr %float_ptr2
   %outgoing_instr = fadd float %0, %1
   ret float %outgoing_instr
 }
 
-define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, i32* %a) {
+define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, ptr %a) {
 ; MIPS32-LABEL: incoming_gpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    move $2, $4
@@ -63,12 +63,12 @@ define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, i32* %a) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %a
+  %0 = load i32, ptr %a
   %cond = select i1 %test, i32 %0, i32 %incoming_phys_reg
   ret i32 %cond
 }
 
-define float @incoming_fpr(float %incoming_phys_reg, i1 %test, float* %a) {
+define float @incoming_fpr(float %incoming_phys_reg, i1 %test, ptr %a) {
 ; MIPS32-LABEL: incoming_fpr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    mov.s $f0, $f12
@@ -78,13 +78,13 @@ define float @incoming_fpr(float %incoming_phys_reg, i1 %test, float* %a) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %a
+  %0 = load float, ptr %a
   %cond = select i1 %test, float %0, float %incoming_phys_reg
   ret float %cond
 }
 
 
-define i32 @incoming_i32_instr(i32 %val1, i32 %val2, i32* %i32_ptr, i1 %test) {
+define i32 @incoming_i32_instr(i32 %val1, i32 %val2, ptr %i32_ptr, i1 %test) {
 ; MIPS32-LABEL: incoming_i32_instr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $1, 0($6)
@@ -94,13 +94,13 @@ define i32 @incoming_i32_instr(i32 %val1, i32 %val2, i32* %i32_ptr, i1 %test) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %i32_ptr
+  %0 = load i32, ptr %i32_ptr
   %incoming_instr = add i32 %val2, %val1
   %cond = select i1 %test, i32 %0, i32 %incoming_instr
   ret i32 %cond
 }
 
-define float @incoming_float_instr(float %val1, float %val2, float* %float_ptr, i1 %test) {
+define float @incoming_float_instr(float %val1, float %val2, ptr %float_ptr, i1 %test) {
 ; MIPS32-LABEL: incoming_float_instr:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lwc1 $f1, 0($6)
@@ -110,7 +110,7 @@ define float @incoming_float_instr(float %val1, float %val2, float* %float_ptr,
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load float, float* %float_ptr
+  %0 = load float, ptr %float_ptr
   %incoming_instr = fadd float %val2, %val1
   %cond = select i1 %test, float %0, float %incoming_instr
   ret float %cond

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll
index 1c085685f0150..05d6ab3944ea5 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll
@@ -1,29 +1,29 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i8 @load1_s8_to_load1_s32(i8* %px) {
+define i8 @load1_s8_to_load1_s32(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_load1_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   ret i8 %0
 }
 
-define i16 @load2_s16_to_load2_s32(i16* %px) {
+define i16 @load2_s16_to_load2_s32(ptr %px) {
 ; MIPS32-LABEL: load2_s16_to_load2_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i16, i16* %px
+  %0 = load i16, ptr %px
   ret i16 %0
 }
 
-define void @load_store_i1(i1* %px, i1* %py) {
+define void @load_store_i1(ptr %px, ptr %py) {
 ; MIPS32-LABEL: load_store_i1:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $1, 0($5)
@@ -33,12 +33,12 @@ define void @load_store_i1(i1* %px, i1* %py) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i1, i1* %py
-  store i1 %0, i1* %px
+  %0 = load i1, ptr %py
+  store i1 %0, ptr %px
   ret void
 }
 
-define void @load_store_i8(i8* %px, i8* %py) {
+define void @load_store_i8(ptr %px, ptr %py) {
 ; MIPS32-LABEL: load_store_i8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $1, 0($5)
@@ -46,12 +46,12 @@ define void @load_store_i8(i8* %px, i8* %py) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %py
-  store i8 %0, i8* %px
+  %0 = load i8, ptr %py
+  store i8 %0, ptr %px
   ret void
 }
 
-define void @load_store_i16(i16* %px, i16* %py) {
+define void @load_store_i16(ptr %px, ptr %py) {
 ; MIPS32-LABEL: load_store_i16:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lhu $1, 0($5)
@@ -59,12 +59,12 @@ define void @load_store_i16(i16* %px, i16* %py) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i16, i16* %py
-  store i16 %0, i16* %px
+  %0 = load i16, ptr %py
+  store i16 %0, ptr %px
   ret void
 }
 
-define void @load_store_i32(i32* %px, i32* %py) {
+define void @load_store_i32(ptr %px, ptr %py) {
 ; MIPS32-LABEL: load_store_i32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $1, 0($5)
@@ -72,7 +72,7 @@ define void @load_store_i32(i32* %px, i32* %py) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %py
-  store i32 %0, i32* %px
+  %0 = load i32, ptr %py
+  store i32 %0, ptr %px
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/var_arg.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/var_arg.ll
index fa6bf93d45d7e..80ff52241c16d 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/var_arg.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/var_arg.ll
@@ -2,11 +2,11 @@
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
 @.str = private unnamed_addr constant [11 x i8] c"string %s\0A\00", align 1
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare i32 @printf(i8*, ...)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare i32 @printf(ptr, ...)
 
-define void @testVaCopyArg(i8* %fmt, ...) {
+define void @testVaCopyArg(ptr %fmt, ...) {
 ; MIPS32-LABEL: testVaCopyArg:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $sp, $sp, -40
@@ -45,23 +45,19 @@ define void @testVaCopyArg(i8* %fmt, ...) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %fmt.addr = alloca i8*, align 4
-  %ap = alloca i8*, align 4
-  %aq = alloca i8*, align 4
-  %s = alloca i8*, align 4
-  store i8* %fmt, i8** %fmt.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = bitcast i8** %aq to i8*
-  %1 = bitcast i8** %ap to i8*
-  call void @llvm.va_copy(i8* %0, i8* %1)
-  %argp.cur = load i8*, i8** %aq, align 4
-  %argp.next = getelementptr inbounds i8, i8* %argp.cur, i32 4
-  store i8* %argp.next, i8** %aq, align 4
-  %2 = bitcast i8* %argp.cur to i8**
-  %3 = load i8*, i8** %2, align 4
-  store i8* %3, i8** %s, align 4
-  %4 = load i8*, i8** %s, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i8* %4)
+  %fmt.addr = alloca ptr, align 4
+  %ap = alloca ptr, align 4
+  %aq = alloca ptr, align 4
+  %s = alloca ptr, align 4
+  store ptr %fmt, ptr %fmt.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  call void @llvm.va_copy(ptr %aq, ptr %ap)
+  %argp.cur = load ptr, ptr %aq, align 4
+  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
+  store ptr %argp.next, ptr %aq, align 4
+  %0 = load ptr, ptr %argp.cur, align 4
+  store ptr %0, ptr %s, align 4
+  %1 = load ptr, ptr %s, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, ptr %1)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/zextLoad_and_sextLoad.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/zextLoad_and_sextLoad.ll
index e42e0fe3a6b21..44d351fd194c9 100644
--- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/zextLoad_and_sextLoad.ll
+++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/zextLoad_and_sextLoad.ll
@@ -1,55 +1,55 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
 
-define i32 @load1_s8_to_zextLoad1_s32(i8* %px) {
+define i32 @load1_s8_to_zextLoad1_s32(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_zextLoad1_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = zext i8 %0 to i32
   ret i32 %conv
 }
 
-define i32 @load2_s16_to_zextLoad2_s32(i16* %px) {
+define i32 @load2_s16_to_zextLoad2_s32(ptr %px) {
 ; MIPS32-LABEL: load2_s16_to_zextLoad2_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lhu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i16, i16* %px
+  %0 = load i16, ptr %px
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }
 
-define i16 @load1_s8_to_zextLoad1_s16(i8* %px) {
+define i16 @load1_s8_to_zextLoad1_s16(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_zextLoad1_s16:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
 
-define zeroext i16 @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {
+define zeroext i16 @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_zextLoad1_s16_to_zextLoad1_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lbu $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = zext i8 %0 to i16
   ret i16 %conv
 }
 
-define i64 @load4_s32_to_zextLoad4_s64(i32* %px) {
+define i64 @load4_s32_to_zextLoad4_s64(ptr %px) {
 ; MIPS32-LABEL: load4_s32_to_zextLoad4_s64:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -57,60 +57,60 @@ define i64 @load4_s32_to_zextLoad4_s64(i32* %px) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %px
+  %0 = load i32, ptr %px
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
 
-define i32 @load1_s8_to_sextLoad1_s32(i8* %px) {
+define i32 @load1_s8_to_sextLoad1_s32(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_sextLoad1_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lb $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = sext i8 %0 to i32
   ret i32 %conv
 }
 
-define i32 @load2_s16_to_sextLoad2_s32(i16* %px) {
+define i32 @load2_s16_to_sextLoad2_s32(ptr %px) {
 ; MIPS32-LABEL: load2_s16_to_sextLoad2_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lh $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i16, i16* %px
+  %0 = load i16, ptr %px
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
 
-define i16 @load1_s8_to_sextLoad1_s16(i8* %px) {
+define i16 @load1_s8_to_sextLoad1_s16(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_sextLoad1_s16:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lb $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
-define signext i16 @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {
+define signext i16 @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {
 ; MIPS32-LABEL: load1_s8_to_sextLoad1_s16_to_sextLoad1_s32:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lb $2, 0($4)
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i8, i8* %px
+  %0 = load i8, ptr %px
   %conv = sext i8 %0 to i16
   ret i16 %conv
 }
 
-define i64 @load4_s32_to_sextLoad4_s64(i32* %px) {
+define i64 @load4_s32_to_sextLoad4_s64(ptr %px) {
 ; MIPS32-LABEL: load4_s32_to_sextLoad4_s64:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    lw $2, 0($4)
@@ -118,7 +118,7 @@ define i64 @load4_s32_to_sextLoad4_s64(i32* %px) {
 ; MIPS32-NEXT:    jr $ra
 ; MIPS32-NEXT:    nop
 entry:
-  %0 = load i32, i32* %px
+  %0 = load i32, ptr %px
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }

diff  --git a/llvm/test/CodeGen/Mips/addc.ll b/llvm/test/CodeGen/Mips/addc.ll
index e5d05b1d6dbb8..6877b3b633ce0 100644
--- a/llvm/test/CodeGen/Mips/addc.ll
+++ b/llvm/test/CodeGen/Mips/addc.ll
@@ -1,13 +1,13 @@
 ; RUN: llc  < %s -march=mipsel | FileCheck %s 
 ; RUN: llc  < %s -march=mips   | FileCheck %s
 
-define void @f(i64 %l, i64* nocapture %p) nounwind {
+define void @f(i64 %l, ptr nocapture %p) nounwind {
 entry:
 ; CHECK: lui  
 ; CHECK: ori
 ; CHECK: addu  
   %add = add i64 %l, 1311768467294899695
-  store i64 %add, i64* %p, align 4 
+  store i64 %add, ptr %p, align 4 
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/addi.ll b/llvm/test/CodeGen/Mips/addi.ll
index 2d34ec5b00302..40a71698d5daf 100644
--- a/llvm/test/CodeGen/Mips/addi.ll
+++ b/llvm/test/CodeGen/Mips/addi.ll
@@ -8,18 +8,18 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %add = add nsw i32 %0, 5
-  store i32 %add, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  store i32 %add, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %sub = sub nsw i32 %1, 5
-  store i32 %sub, i32* @j, align 4
-  %2 = load i32, i32* @k, align 4
+  store i32 %sub, ptr @j, align 4
+  %2 = load i32, ptr @k, align 4
   %add1 = add nsw i32 %2, 10000
-  store i32 %add1, i32* @k, align 4
-  %3 = load i32, i32* @l, align 4
+  store i32 %add1, ptr @k, align 4
+  %3 = load i32, ptr @l, align 4
   %sub2 = sub nsw i32 %3, 10000
-  store i32 %sub2, i32* @l, align 4
+  store i32 %sub2, ptr @l, align 4
 ; 16: 	addiu	${{[0-9]+}}, 5	# 16 bit inst
 ; 16: 	addiu	${{[0-9]+}}, -5	# 16 bit inst
 ; 16: 	addiu	${{[0-9]+}}, 10000

diff  --git a/llvm/test/CodeGen/Mips/address-selection.ll b/llvm/test/CodeGen/Mips/address-selection.ll
index 5a1a97fdb3528..d7b3fc66813c6 100644
--- a/llvm/test/CodeGen/Mips/address-selection.ll
+++ b/llvm/test/CodeGen/Mips/address-selection.ll
@@ -12,29 +12,29 @@
 
 @x = global i32 0
 @a = global i32 1
-declare i32 @y(i32*, i32)
+declare i32 @y(ptr, i32)
 
 define i32 @z() {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = call i32 @y(i32 * @x, i32 %0)
+  %0 = load i32, ptr @a, align 4
+  %1 = call i32 @y(ptr @x, i32 %0)
   ret i32 %1
 }
 
 ; MIPS-LABEL: ===== Instruction selection ends:
-; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<i32* @x> 0 [TF=4]
-; MIPS: t{{.*}}: i32 = ADDiu t[[A]], TargetGlobalAddress:i32<i32* @x> 0 [TF=5]
+; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<ptr @x> 0 [TF=4]
+; MIPS: t{{.*}}: i32 = ADDiu t[[A]], TargetGlobalAddress:i32<ptr @x> 0 [TF=5]
 
 ; MIPS-XGOT-LABEL: ===== Instruction selection ends:
-; MIPS-XGOT: t[[B:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<i32* @x> 0 [TF=20]
+; MIPS-XGOT: t[[B:[0-9]+]]: i32 = LUi TargetGlobalAddress:i32<ptr @x> 0 [TF=20]
 ; MIPS-XGOT: t[[C:[0-9]+]]: i32 = ADDu t[[B]], Register:i32 %0
-; MIPS-XGOT: t{{.*}}: i32,ch = LW<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<i32* @x> 0 [TF=21], t{{.*}}
+; MIPS-XGOT: t{{.*}}: i32,ch = LW<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<ptr @x> 0 [TF=21], t{{.*}}
 
 ; MM-LABEL: ===== Instruction selection ends:
-; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<i32* @x> 0 [TF=4]
-; MM: t{{.*}}: i32 = ADDiu_MM t[[A]], TargetGlobalAddress:i32<i32* @x> 0 [TF=5]
+; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<ptr @x> 0 [TF=4]
+; MM: t{{.*}}: i32 = ADDiu_MM t[[A]], TargetGlobalAddress:i32<ptr @x> 0 [TF=5]
 
 ; MM-XGOT-LABEL: ===== Instruction selection ends:
-; MM-XGOT: t[[B:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<i32* @x> 0 [TF=20]
+; MM-XGOT: t[[B:[0-9]+]]: i32 = LUi_MM TargetGlobalAddress:i32<ptr @x> 0 [TF=20]
 ; MM-XGOT: t[[C:[0-9]+]]: i32 = ADDU16_MM t[[B]], Register:i32 %0
-; MM-XGOT: t{{.*}}: i32,ch = LW_MM<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<i32* @x> 0 [TF=21], t0
+; MM-XGOT: t{{.*}}: i32,ch = LW_MM<Mem:(load (s32) from got)> t[[C]], TargetGlobalAddress:i32<ptr @x> 0 [TF=21], t0

diff  --git a/llvm/test/CodeGen/Mips/addressing-mode.ll b/llvm/test/CodeGen/Mips/addressing-mode.ll
index 81e062062ecf6..bd8daf45be2c4 100644
--- a/llvm/test/CodeGen/Mips/addressing-mode.ll
+++ b/llvm/test/CodeGen/Mips/addressing-mode.ll
@@ -8,7 +8,7 @@
 ; CHECK:      $BB0_2:
 ; CHECK-NOT:  sll ${{[0-9]+}}, ${{[0-9]+}}, 2
 
-define i32 @f0(i32 %n, i32 %m, [256 x i32]* nocapture %a, [256 x i32]* nocapture %b) nounwind readonly {
+define i32 @f0(i32 %n, i32 %m, ptr nocapture %a, ptr nocapture %b) nounwind readonly {
 entry:
   br label %for.cond1.preheader
 
@@ -20,10 +20,10 @@ for.cond1.preheader:
 for.body3:
   %s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ]
   %j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
-  %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019
-  %0 = load i32, i32* %arrayidx4, align 4
-  %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019
-  %1 = load i32, i32* %arrayidx6, align 4
+  %arrayidx4 = getelementptr inbounds [256 x i32], ptr %a, i32 %i.021, i32 %j.019
+  %0 = load i32, ptr %arrayidx4, align 4
+  %arrayidx6 = getelementptr inbounds [256 x i32], ptr %b, i32 %i.021, i32 %j.019
+  %1 = load i32, ptr %arrayidx6, align 4
   %add = add i32 %0, %s.120
   %add7 = add i32 %add, %1
   %add8 = add nsw i32 %j.019, %m

diff  --git a/llvm/test/CodeGen/Mips/adjust-callstack-sp.ll b/llvm/test/CodeGen/Mips/adjust-callstack-sp.ll
index 32d77ac19ae6b..c583ff0fdd6cf 100644
--- a/llvm/test/CodeGen/Mips/adjust-callstack-sp.ll
+++ b/llvm/test/CodeGen/Mips/adjust-callstack-sp.ll
@@ -6,7 +6,7 @@
 ; RUN: llc < %s -march=mips -mcpu=mips64 -target-abi n64 | FileCheck %s -check-prefix=GP64
 ; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 | FileCheck %s -check-prefix=GP64
 
-declare void @bar(i32*)
+declare void @bar(ptr)
 
 define void @foo(i32 %sz) {
   ; ALL-LABEL: foo:
@@ -15,6 +15,6 @@ define void @foo(i32 %sz) {
   ; GP32-NOT:       addiu     $sp, $sp, 0
   ; GP64-NOT:       daddiu    $sp, $sp, 0
   %a = alloca i32, i32 %sz
-  call void @bar(i32* %a)
+  call void @bar(ptr %a)
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/align16.ll b/llvm/test/CodeGen/Mips/align16.ll
index 7772812a3be69..a264272a6b818 100644
--- a/llvm/test/CodeGen/Mips/align16.ll
+++ b/llvm/test/CodeGen/Mips/align16.ll
@@ -3,7 +3,7 @@
 @i = global i32 25, align 4
 @.str = private unnamed_addr constant [5 x i8] c"%i \0A\00", align 1
 
-define void @p(i32* %i) nounwind {
+define void @p(ptr %i) nounwind {
 entry:
   ret void
 }
@@ -15,14 +15,14 @@ entry:
   %x = alloca i32, align 8
   %zz = alloca i32, align 4
   %z = alloca i32, align 4
-  %0 = load i32, i32* @i, align 4
-  %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
-  store i32 %0, i32* %arrayidx, align 4
-  %1 = load i32, i32* @i, align 4
-  store i32 %1, i32* %x, align 8
-  call void @p(i32* %x)
-  %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
-  call void @p(i32* %arrayidx1)
+  %0 = load i32, ptr @i, align 4
+  %arrayidx = getelementptr inbounds [512 x i32], ptr %y, i32 0, i32 10
+  store i32 %0, ptr %arrayidx, align 4
+  %1 = load i32, ptr @i, align 4
+  store i32 %1, ptr %x, align 8
+  call void @p(ptr %x)
+  %arrayidx1 = getelementptr inbounds [512 x i32], ptr %y, i32 0, i32 10
+  call void @p(ptr %arrayidx1)
   ret void
 }
 ; 16:	save	$ra, 2040

diff  --git a/llvm/test/CodeGen/Mips/alloca.ll b/llvm/test/CodeGen/Mips/alloca.ll
index b708ddb134ce4..1fd6d859954ad 100644
--- a/llvm/test/CodeGen/Mips/alloca.ll
+++ b/llvm/test/CodeGen/Mips/alloca.ll
@@ -9,19 +9,19 @@ entry:
 ; CHECK: move  $4, $[[T0]]
 ; CHECK: move  $4, $[[T2]]
   %tmp1 = alloca i8, i32 %size, align 4
-  %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 5
-  store i8 97, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %tmp1, i32 5
+  store i8 97, ptr %add.ptr, align 1
   %tmp4 = alloca i8, i32 %size, align 4
   call void @foo2(double 1.000000e+00, double 2.000000e+00, i32 3) nounwind
-  %call = call i32 @foo(i8* %tmp1) nounwind
-  %call7 = call i32 @foo(i8* %tmp4) nounwind
+  %call = call i32 @foo(ptr %tmp1) nounwind
+  %call7 = call i32 @foo(ptr %tmp4) nounwind
   %add = add nsw i32 %call7, %call
   ret i32 %add
 }
 
 declare void @foo2(double, double, i32)
 
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)
 
 @.str = private unnamed_addr constant [22 x i8] c"%d %d %d %d %d %d %d\0A\00", align 1
 
@@ -32,54 +32,45 @@ entry:
 ; CHECK: move  $sp, $[[T0]]
 
   %tmp1 = alloca i8, i32 %size, align 4
-  %0 = bitcast i8* %tmp1 to i32*
   %cmp = icmp sgt i32 %size, 10
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
 ; CHECK: addiu $4, $[[T0]], 40
 
-  %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 40
-  %1 = bitcast i8* %add.ptr to i32*
-  call void @foo3(i32* %1) nounwind
-  %arrayidx15.pre = getelementptr inbounds i8, i8* %tmp1, i32 12
-  %.pre = bitcast i8* %arrayidx15.pre to i32*
+  %add.ptr = getelementptr inbounds i8, ptr %tmp1, i32 40
+  call void @foo3(ptr %add.ptr) nounwind
+  %arrayidx15.pre = getelementptr inbounds i8, ptr %tmp1, i32 12
   br label %if.end
 
 if.else:                                          ; preds = %entry
 ; CHECK: addiu $4, $[[T0]], 12
 
-  %add.ptr5 = getelementptr inbounds i8, i8* %tmp1, i32 12
-  %2 = bitcast i8* %add.ptr5 to i32*
-  call void @foo3(i32* %2) nounwind
+  %add.ptr5 = getelementptr inbounds i8, ptr %tmp1, i32 12
+  call void @foo3(ptr %add.ptr5) nounwind
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
 ; CHECK: lw  $5, 0($[[T0]])
 ; CHECK: lw  $25, %call16(printf)
 
-  %.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ]
-  %tmp7 = load i32, i32* %0, align 4
-  %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4
-  %3 = bitcast i8* %arrayidx9 to i32*
-  %tmp10 = load i32, i32* %3, align 4
-  %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8
-  %4 = bitcast i8* %arrayidx12 to i32*
-  %tmp13 = load i32, i32* %4, align 4
-  %tmp16 = load i32, i32* %.pre-phi, align 4
-  %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16
-  %5 = bitcast i8* %arrayidx18 to i32*
-  %tmp19 = load i32, i32* %5, align 4
-  %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20
-  %6 = bitcast i8* %arrayidx21 to i32*
-  %tmp22 = load i32, i32* %6, align 4
-  %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24
-  %7 = bitcast i8* %arrayidx24 to i32*
-  %tmp25 = load i32, i32* %7, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
+  %arrayidx15.pre-phi = phi ptr [ %add.ptr5, %if.else ], [ %arrayidx15.pre, %if.then ]
+  %tmp7 = load i32, ptr %tmp1, align 4
+  %arrayidx9 = getelementptr inbounds i8, ptr %tmp1, i32 4
+  %tmp10 = load i32, ptr %arrayidx9, align 4
+  %arrayidx12 = getelementptr inbounds i8, ptr %tmp1, i32 8
+  %tmp13 = load i32, ptr %arrayidx12, align 4
+  %tmp16 = load i32, ptr %arrayidx15.pre-phi, align 4
+  %arrayidx18 = getelementptr inbounds i8, ptr %tmp1, i32 16
+  %tmp19 = load i32, ptr %arrayidx18, align 4
+  %arrayidx21 = getelementptr inbounds i8, ptr %tmp1, i32 20
+  %tmp22 = load i32, ptr %arrayidx21, align 4
+  %arrayidx24 = getelementptr inbounds i8, ptr %tmp1, i32 24
+  %tmp25 = load i32, ptr %arrayidx24, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
   ret i32 0
 }
 
-declare void @foo3(i32*)
+declare void @foo3(ptr)
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Mips/alloca16.ll b/llvm/test/CodeGen/Mips/alloca16.ll
index d728d3bb0b7bb..b6921d59e94c9 100644
--- a/llvm/test/CodeGen/Mips/alloca16.ll
+++ b/llvm/test/CodeGen/Mips/alloca16.ll
@@ -11,9 +11,9 @@
 define void @temp(i32 %foo) nounwind {
 entry:
   %foo.addr = alloca i32, align 4
-  store i32 %foo, i32* %foo.addr, align 4
-  %0 = load i32, i32* %foo.addr, align 4
-  store i32 %0, i32* @t, align 4
+  store i32 %foo, ptr %foo.addr, align 4
+  %0 = load i32, ptr %foo.addr, align 4
+  store i32 %0, ptr @t, align 4
   ret void
 }
 
@@ -26,50 +26,49 @@ entry:
 ; 16:	subu	$[[REGISTER:[0-9]+]], ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	move	$sp, $[[REGISTER]]
   %sssi = alloca i32, align 4
-  %ip = alloca i32*, align 4
+  %ip = alloca ptr, align 4
   %sssj = alloca i32, align 4
-  %0 = load i32, i32* @iiii, align 4
-  store i32 %0, i32* %sssi, align 4
-  %1 = load i32, i32* @kkkk, align 4
+  %0 = load i32, ptr @iiii, align 4
+  store i32 %0, ptr %sssi, align 4
+  %1 = load i32, ptr @kkkk, align 4
   %mul = mul nsw i32 %1, 100
   %2 = alloca i8, i32 %mul
-  %3 = bitcast i8* %2 to i32*
-  store i32* %3, i32** %ip, align 4
-  %4 = load i32, i32* @jjjj, align 4
-  store i32 %4, i32* %sssj, align 4
-  %5 = load i32, i32* @jjjj, align 4
-  %6 = load i32, i32* @iiii, align 4
-  %7 = load i32*, i32** %ip, align 4
-  %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6
-  store i32 %5, i32* %arrayidx, align 4
-  %8 = load i32, i32* @kkkk, align 4
-  %9 = load i32, i32* @jjjj, align 4
-  %10 = load i32*, i32** %ip, align 4
-  %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9
-  store i32 %8, i32* %arrayidx1, align 4
-  %11 = load i32, i32* @iiii, align 4
-  %12 = load i32, i32* @kkkk, align 4
-  %13 = load i32*, i32** %ip, align 4
-  %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12
-  store i32 %11, i32* %arrayidx2, align 4
-  %14 = load i32*, i32** %ip, align 4
-  %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25
-  %15 = load i32, i32* %arrayidx3, align 4
-  store i32 %15, i32* @riii, align 4
-  %16 = load i32*, i32** %ip, align 4
-  %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35
-  %17 = load i32, i32* %arrayidx4, align 4
-  store i32 %17, i32* @rjjj, align 4
-  %18 = load i32*, i32** %ip, align 4
-  %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100
-  %19 = load i32, i32* %arrayidx5, align 4
-  store i32 %19, i32* @rkkk, align 4
-  %20 = load i32, i32* @t, align 4
-  %21 = load i32*, i32** %ip, align 4
-  %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20
-  %22 = load i32, i32* %arrayidx6, align 4
+  store ptr %2, ptr %ip, align 4
+  %3 = load i32, ptr @jjjj, align 4
+  store i32 %3, ptr %sssj, align 4
+  %4 = load i32, ptr @jjjj, align 4
+  %5 = load i32, ptr @iiii, align 4
+  %6 = load ptr, ptr %ip, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %6, i32 %5
+  store i32 %4, ptr %arrayidx, align 4
+  %7 = load i32, ptr @kkkk, align 4
+  %8 = load i32, ptr @jjjj, align 4
+  %9 = load ptr, ptr %ip, align 4
+  %arrayidx1 = getelementptr inbounds i32, ptr %9, i32 %8
+  store i32 %7, ptr %arrayidx1, align 4
+  %10 = load i32, ptr @iiii, align 4
+  %11 = load i32, ptr @kkkk, align 4
+  %12 = load ptr, ptr %ip, align 4
+  %arrayidx2 = getelementptr inbounds i32, ptr %12, i32 %11
+  store i32 %10, ptr %arrayidx2, align 4
+  %13 = load ptr, ptr %ip, align 4
+  %arrayidx3 = getelementptr inbounds i32, ptr %13, i32 25
+  %14 = load i32, ptr %arrayidx3, align 4
+  store i32 %14, ptr @riii, align 4
+  %15 = load ptr, ptr %ip, align 4
+  %arrayidx4 = getelementptr inbounds i32, ptr %15, i32 35
+  %16 = load i32, ptr %arrayidx4, align 4
+  store i32 %16, ptr @rjjj, align 4
+  %17 = load ptr, ptr %ip, align 4
+  %arrayidx5 = getelementptr inbounds i32, ptr %17, i32 100
+  %18 = load i32, ptr %arrayidx5, align 4
+  store i32 %18, ptr @rkkk, align 4
+  %19 = load i32, ptr @t, align 4
+  %20 = load ptr, ptr %ip, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr %20, i32 %19
+  %21 = load i32, ptr %arrayidx6, align 4
 ; 16: 	addiu $sp, -16
-  call void @temp(i32 %22)
+  call void @temp(i32 %21)
 ; 16: 	addiu $sp, 16
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/and1.ll b/llvm/test/CodeGen/Mips/and1.ll
index a2bf4f080a076..7b5380fab2fdf 100644
--- a/llvm/test/CodeGen/Mips/and1.ll
+++ b/llvm/test/CodeGen/Mips/and1.ll
@@ -6,12 +6,12 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
-  %1 = load i32, i32* @y, align 4
+  %0 = load i32, ptr @x, align 4
+  %1 = load i32, ptr @y, align 4
   %and = and i32 %0, %1
 ; 16:	and	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %and)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/atomic-min-max-64.ll b/llvm/test/CodeGen/Mips/atomic-min-max-64.ll
index 6b71fe6e195a7..5273f499cedec 100644
--- a/llvm/test/CodeGen/Mips/atomic-min-max-64.ll
+++ b/llvm/test/CodeGen/Mips/atomic-min-max-64.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -march=mips64 -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSR6
 ; RUN: llc -march=mips64el -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPSR6
 
-define i64 @test_max(i64* nocapture %ptr, i64 signext %val) {
+define i64 @test_max(ptr nocapture %ptr, i64 signext %val) {
 ; MIPS-LABEL: test_max:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -38,11 +38,11 @@ define i64 @test_max(i64* nocapture %ptr, i64 signext %val) {
 ; MIPSR6-NEXT:    sync
 ; MIPSR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw max i64* %ptr, i64 %val seq_cst
+  %0 = atomicrmw max ptr %ptr, i64 %val seq_cst
   ret i64 %0
 }
 
-define i64 @test_min(i64* nocapture %ptr, i64 signext %val) {
+define i64 @test_min(ptr nocapture %ptr, i64 signext %val) {
 ; MIPS-LABEL: test_min:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -76,11 +76,11 @@ define i64 @test_min(i64* nocapture %ptr, i64 signext %val) {
 ; MIPSR6-NEXT:    sync
 ; MIPSR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw min i64* %ptr, i64 %val seq_cst
+  %0 = atomicrmw min ptr %ptr, i64 %val seq_cst
   ret i64 %0
 }
 
-define i64 @test_umax(i64* nocapture %ptr, i64 zeroext %val) {
+define i64 @test_umax(ptr nocapture %ptr, i64 zeroext %val) {
 ; MIPS-LABEL: test_umax:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -114,11 +114,11 @@ define i64 @test_umax(i64* nocapture %ptr, i64 zeroext %val) {
 ; MIPSR6-NEXT:    sync
 ; MIPSR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umax i64* %ptr, i64 %val seq_cst
+  %0 = atomicrmw umax ptr %ptr, i64 %val seq_cst
   ret i64 %0
 }
 
-define i64 @test_umin(i64* nocapture %ptr, i64 zeroext %val) {
+define i64 @test_umin(ptr nocapture %ptr, i64 zeroext %val) {
 ; MIPS-LABEL: test_umin:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -152,7 +152,7 @@ define i64 @test_umin(i64* nocapture %ptr, i64 zeroext %val) {
 ; MIPSR6-NEXT:    sync
 ; MIPSR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umin i64* %ptr, i64 %val seq_cst
+  %0 = atomicrmw umin ptr %ptr, i64 %val seq_cst
   ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/atomic-min-max.ll b/llvm/test/CodeGen/Mips/atomic-min-max.ll
index 8fa95e6d5e4d1..f953c885ea734 100644
--- a/llvm/test/CodeGen/Mips/atomic-min-max.ll
+++ b/llvm/test/CodeGen/Mips/atomic-min-max.ll
@@ -12,7 +12,7 @@
 ; RUN: llc -march=mips64el -O0 -mcpu=mips64r2 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS64EL
 ; RUN: llc -march=mips64el -O0 -mcpu=mips64r6 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=MIPS64ELR6
 
-define i32 @test_max_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_max_32(ptr nocapture %ptr, i32 signext %val) {
 ; MIPS-LABEL: test_max_32:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -211,11 +211,11 @@ define i32 @test_max_32(i32* nocapture %ptr, i32 signext %val) {
 ; MIPS64ELR6-NEXT:    sync
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw max i32* %ptr, i32 %val seq_cst
+  %0 = atomicrmw max ptr %ptr, i32 %val seq_cst
   ret i32 %0
 }
 
-define i32 @test_min_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_min_32(ptr nocapture %ptr, i32 signext %val) {
 ; MIPS-LABEL: test_min_32:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -414,11 +414,11 @@ define i32 @test_min_32(i32* nocapture %ptr, i32 signext %val) {
 ; MIPS64ELR6-NEXT:    sync
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw min i32* %ptr, i32 %val seq_cst
+  %0 = atomicrmw min ptr %ptr, i32 %val seq_cst
   ret i32 %0
 }
 
-define i32 @test_umax_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_umax_32(ptr nocapture %ptr, i32 signext %val) {
 ; MIPS-LABEL: test_umax_32:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -617,11 +617,11 @@ define i32 @test_umax_32(i32* nocapture %ptr, i32 signext %val) {
 ; MIPS64ELR6-NEXT:    sync
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umax i32* %ptr, i32 %val seq_cst
+  %0 = atomicrmw umax ptr %ptr, i32 %val seq_cst
   ret i32 %0
 }
 
-define i32 @test_umin_32(i32* nocapture %ptr, i32 signext %val) {
+define i32 @test_umin_32(ptr nocapture %ptr, i32 signext %val) {
 ; MIPS-LABEL: test_umin_32:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    sync
@@ -820,11 +820,11 @@ define i32 @test_umin_32(i32* nocapture %ptr, i32 signext %val) {
 ; MIPS64ELR6-NEXT:    sync
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umin i32* %ptr, i32 %val seq_cst
+  %0 = atomicrmw umin ptr %ptr, i32 %val seq_cst
   ret i32 %0
 }
 
-define i16 @test_max_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_max_16(ptr nocapture %ptr, i16 signext %val) {
 ; MIPS-LABEL: test_max_16:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -1301,11 +1301,11 @@ define i16 @test_max_16(i16* nocapture %ptr, i16 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw max i16* %ptr, i16 %val seq_cst
+  %0 = atomicrmw max ptr %ptr, i16 %val seq_cst
   ret i16 %0
 }
 
-define i16 @test_min_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_min_16(ptr nocapture %ptr, i16 signext %val) {
 ; MIPS-LABEL: test_min_16:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -1782,11 +1782,11 @@ define i16 @test_min_16(i16* nocapture %ptr, i16 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw min i16* %ptr, i16 %val seq_cst
+  %0 = atomicrmw min ptr %ptr, i16 %val seq_cst
   ret i16 %0
 }
 
-define i16 @test_umax_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_umax_16(ptr nocapture %ptr, i16 signext %val) {
 ; MIPS-LABEL: test_umax_16:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -2263,11 +2263,11 @@ define i16 @test_umax_16(i16* nocapture %ptr, i16 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umax i16* %ptr, i16 %val seq_cst
+  %0 = atomicrmw umax ptr %ptr, i16 %val seq_cst
   ret i16 %0
 }
 
-define i16 @test_umin_16(i16* nocapture %ptr, i16 signext %val) {
+define i16 @test_umin_16(ptr nocapture %ptr, i16 signext %val) {
 ; MIPS-LABEL: test_umin_16:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -2744,12 +2744,12 @@ define i16 @test_umin_16(i16* nocapture %ptr, i16 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umin i16* %ptr, i16 %val seq_cst
+  %0 = atomicrmw umin ptr %ptr, i16 %val seq_cst
   ret i16 %0
 }
 
 
-define i8 @test_max_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_max_8(ptr nocapture %ptr, i8 signext %val) {
 ; MIPS-LABEL: test_max_8:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -3226,11 +3226,11 @@ define i8 @test_max_8(i8* nocapture %ptr, i8 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw max i8* %ptr, i8 %val seq_cst
+  %0 = atomicrmw max ptr %ptr, i8 %val seq_cst
   ret i8 %0
 }
 
-define i8 @test_min_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_min_8(ptr nocapture %ptr, i8 signext %val) {
 ; MIPS-LABEL: test_min_8:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -3707,11 +3707,11 @@ define i8 @test_min_8(i8* nocapture %ptr, i8 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw min i8* %ptr, i8 %val seq_cst
+  %0 = atomicrmw min ptr %ptr, i8 %val seq_cst
   ret i8 %0
 }
 
-define i8 @test_umax_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_umax_8(ptr nocapture %ptr, i8 signext %val) {
 ; MIPS-LABEL: test_umax_8:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -4188,11 +4188,11 @@ define i8 @test_umax_8(i8* nocapture %ptr, i8 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umax i8* %ptr, i8 %val seq_cst
+  %0 = atomicrmw umax ptr %ptr, i8 %val seq_cst
   ret i8 %0
 }
 
-define i8 @test_umin_8(i8* nocapture %ptr, i8 signext %val) {
+define i8 @test_umin_8(ptr nocapture %ptr, i8 signext %val) {
 ; MIPS-LABEL: test_umin_8:
 ; MIPS:       # %bb.0: # %entry
 ; MIPS-NEXT:    addiu $sp, $sp, -8
@@ -4669,6 +4669,6 @@ define i8 @test_umin_8(i8* nocapture %ptr, i8 signext %val) {
 ; MIPS64ELR6-NEXT:    daddiu $sp, $sp, 16
 ; MIPS64ELR6-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw umin i8* %ptr, i8 %val seq_cst
+  %0 = atomicrmw umin ptr %ptr, i8 %val seq_cst
   ret i8 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/atomic.ll b/llvm/test/CodeGen/Mips/atomic.ll
index c8b67eda156f8..eaf99cc7023a3 100644
--- a/llvm/test/CodeGen/Mips/atomic.ll
+++ b/llvm/test/CodeGen/Mips/atomic.ll
@@ -285,7 +285,7 @@ define i32 @AtomicLoadAdd32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i32* @x, i32 %incr monotonic
+  %0 = atomicrmw add ptr @x, i32 %incr monotonic
   ret i32 %0
 
 }
@@ -541,7 +541,7 @@ define i32 @AtomicLoadSub32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw sub i32* @x, i32 %incr monotonic
+  %0 = atomicrmw sub ptr @x, i32 %incr monotonic
   ret i32 %0
 
 }
@@ -797,7 +797,7 @@ define i32 @AtomicLoadXor32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw xor i32* @x, i32 %incr monotonic
+  %0 = atomicrmw xor ptr @x, i32 %incr monotonic
   ret i32 %0
 }
 
@@ -1052,7 +1052,7 @@ define i32 @AtomicLoadOr32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw or i32* @x, i32 %incr monotonic
+  %0 = atomicrmw or ptr @x, i32 %incr monotonic
   ret i32 %0
 }
 
@@ -1307,7 +1307,7 @@ define i32 @AtomicLoadAnd32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw and i32* @x, i32 %incr monotonic
+  %0 = atomicrmw and ptr @x, i32 %incr monotonic
   ret i32 %0
 }
 
@@ -1577,7 +1577,7 @@ define i32 @AtomicLoadNand32(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw nand i32* @x, i32 %incr monotonic
+  %0 = atomicrmw nand ptr @x, i32 %incr monotonic
   ret i32 %0
 
 }
@@ -1871,9 +1871,9 @@ define i32 @AtomicSwap32(i32 signext %newval) nounwind {
 ; MIPS32EB-NEXT:    addiu $sp, $sp, 8
 entry:
   %newval.addr = alloca i32, align 4
-  store i32 %newval, i32* %newval.addr, align 4
-  %tmp = load i32, i32* %newval.addr, align 4
-  %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
+  store i32 %newval, ptr %newval.addr, align 4
+  %tmp = load i32, ptr %newval.addr, align 4
+  %0 = atomicrmw xchg ptr @x, i32 %tmp monotonic
   ret i32 %0
 
 }
@@ -2227,9 +2227,9 @@ define i32 @AtomicCmpSwap32(i32 signext %oldval, i32 signext %newval) nounwind {
 ; MIPS32EB-NEXT:    addiu $sp, $sp, 8
 entry:
   %newval.addr = alloca i32, align 4
-  store i32 %newval, i32* %newval.addr, align 4
-  %tmp = load i32, i32* %newval.addr, align 4
-  %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic
+  store i32 %newval, ptr %newval.addr, align 4
+  %tmp = load i32, ptr %newval.addr, align 4
+  %0 = cmpxchg ptr @x, i32 %oldval, i32 %tmp monotonic monotonic
   %1 = extractvalue { i32, i1 } %0, 0
   ret i32 %1
 
@@ -2737,7 +2737,7 @@ define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i8* @y, i8 %incr monotonic
+  %0 = atomicrmw add ptr @y, i8 %incr monotonic
   ret i8 %0
 }
 
@@ -3241,7 +3241,7 @@ define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw sub i8* @y, i8 %incr monotonic
+  %0 = atomicrmw sub ptr @y, i8 %incr monotonic
   ret i8 %0
 
 }
@@ -3761,7 +3761,7 @@ define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw nand i8* @y, i8 %incr monotonic
+  %0 = atomicrmw nand ptr @y, i8 %incr monotonic
   ret i8 %0
 
 }
@@ -4251,7 +4251,7 @@ define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw xchg i8* @y, i8 %newval monotonic
+  %0 = atomicrmw xchg ptr @y, i8 %newval monotonic
   ret i8 %0
 }
 
@@ -4832,12 +4832,12 @@ define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwi
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+  %pair0 = cmpxchg ptr @y, i8 %oldval, i8 %newval monotonic monotonic
   %0 = extractvalue { i8, i1 } %pair0, 0
   ret i8 %0
 }
 
-define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 signext %oldval, i8 signext %newval) nounwind {
+define i1 @AtomicCmpSwapRes8(ptr %ptr, i8 signext %oldval, i8 signext %newval) nounwind {
 ; MIPS32-LABEL: AtomicCmpSwapRes8:
 ; MIPS32:       # %bb.0: # %entry
 ; MIPS32-NEXT:    addiu $1, $zero, -4
@@ -5384,7 +5384,7 @@ define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 signext %oldval, i8 signext %newval) n
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    sltiu $2, $1, 1
 entry:
-  %0 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval monotonic monotonic
+  %0 = cmpxchg ptr %ptr, i8 %oldval, i8 %newval monotonic monotonic
   %1 = extractvalue { i8, i1 } %0, 1
   ret i1 %1
 ; FIXME: -march=mips produces a redundant sign extension here...
@@ -5895,7 +5895,7 @@ define signext i16 @AtomicLoadAdd16(i16 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i16* @z, i16 %incr monotonic
+  %0 = atomicrmw add ptr @z, i16 %incr monotonic
   ret i16 %0
 
 }
@@ -5905,7 +5905,7 @@ entry:
 ; value.
 ; The rest of the functions here are testing the atomic expansion, so
 ; we just match the end of the function.
-define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
+define {i16, i1} @foo(ptr %addr, i16 %l, i16 %r, i16 %new) {
 ; MIPS32-LABEL: foo:
 ; MIPS32:       # %bb.0:
 ; MIPS32-NEXT:    addu $1, $5, $6
@@ -6514,7 +6514,7 @@ define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
   %desired = add i16 %l, %r
-  %res = cmpxchg i16* %addr, i16 %desired, i16 %new seq_cst seq_cst
+  %res = cmpxchg ptr %addr, i16 %desired, i16 %new seq_cst seq_cst
   ret {i16, i1} %res
 }
 
@@ -6797,7 +6797,7 @@ define i32 @CheckSync(i32 signext %v) nounwind noinline {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i32* @countsint, i32 %v seq_cst
+  %0 = atomicrmw add ptr @countsint, i32 %v seq_cst
   ret i32 %0
 }
 
@@ -7208,7 +7208,7 @@ define i32 @zeroreg() nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %pair0 = cmpxchg i32* @a, i32 1, i32 0 seq_cst seq_cst
+  %pair0 = cmpxchg ptr @a, i32 1, i32 0 seq_cst seq_cst
   %0 = extractvalue { i32, i1 } %pair0, 0
   %1 = icmp eq i32 %0, 1
   %conv = zext i1 %1 to i32
@@ -7483,7 +7483,7 @@ define i32 @AtomicLoadAdd32_OffGt9Bit(i32 signext %incr) nounwind {
 ; MIPS32EB-NEXT:    jr $ra
 ; MIPS32EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i32* getelementptr(i32, i32* @x, i32 256), i32 %incr monotonic
+  %0 = atomicrmw add ptr getelementptr(i32, ptr @x, i32 256), i32 %incr monotonic
   ret i32 %0
 
 }

diff  --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll
index d27c9ac42e059..a454c442b62bd 100644
--- a/llvm/test/CodeGen/Mips/atomic64.ll
+++ b/llvm/test/CodeGen/Mips/atomic64.ll
@@ -174,7 +174,7 @@ define i64 @AtomicLoadAdd(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw add i64* @x, i64 %incr monotonic
+  %0 = atomicrmw add ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -331,7 +331,7 @@ define i64 @AtomicLoadSub(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw sub i64* @x, i64 %incr monotonic
+  %0 = atomicrmw sub ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -488,7 +488,7 @@ define i64 @AtomicLoadAnd(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw and i64* @x, i64 %incr monotonic
+  %0 = atomicrmw and ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -645,7 +645,7 @@ define i64 @AtomicLoadOr(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw or i64* @x, i64 %incr monotonic
+  %0 = atomicrmw or ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -802,7 +802,7 @@ define i64 @AtomicLoadXor(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw xor i64* @x, i64 %incr monotonic
+  %0 = atomicrmw xor ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -968,7 +968,7 @@ define i64 @AtomicLoadNand(i64 signext %incr) nounwind {
 ; MIPS64EB-NEXT:    jr $ra
 ; MIPS64EB-NEXT:    nop
 entry:
-  %0 = atomicrmw nand i64* @x, i64 %incr monotonic
+  %0 = atomicrmw nand ptr @x, i64 %incr monotonic
   ret i64 %0
 
 }
@@ -1146,9 +1146,9 @@ define i64 @AtomicSwap64(i64 signext %newval) nounwind {
 ; MIPS64EB-NEXT:    daddiu $sp, $sp, 16
 entry:
   %newval.addr = alloca i64, align 4
-  store i64 %newval, i64* %newval.addr, align 4
-  %tmp = load i64, i64* %newval.addr, align 4
-  %0 = atomicrmw xchg i64* @x, i64 %tmp monotonic
+  store i64 %newval, ptr %newval.addr, align 4
+  %tmp = load i64, ptr %newval.addr, align 4
+  %0 = atomicrmw xchg ptr @x, i64 %tmp monotonic
   ret i64 %0
 
 }
@@ -1360,9 +1360,9 @@ define i64 @AtomicCmpSwap64(i64 signext %oldval, i64 signext %newval) nounwind {
 ; MIPS64EB-NEXT:    daddiu $sp, $sp, 16
 entry:
   %newval.addr = alloca i64, align 4
-  store i64 %newval, i64* %newval.addr, align 4
-  %tmp = load i64, i64* %newval.addr, align 4
-  %0 = cmpxchg i64* @x, i64 %oldval, i64 %tmp monotonic monotonic
+  store i64 %newval, ptr %newval.addr, align 4
+  %tmp = load i64, ptr %newval.addr, align 4
+  %0 = cmpxchg ptr @x, i64 %oldval, i64 %tmp monotonic monotonic
   %1 = extractvalue { i64, i1 } %0, 0
   ret i64 %1
 

diff  --git a/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll b/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
index cf8cf7e539318..97431b6f90392 100644
--- a/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
+++ b/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
@@ -6,7 +6,7 @@
 ; RUN: llc -O0 -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 -target-abi=n64 < %s -filetype=asm -o - \
 ; RUN:   | FileCheck -check-prefixes=N64 %s
 
- at sym = external global i32 *
+ at sym = external global ptr
 
 define void @foo(i32 %new, i32 %old) {
 ; O32-LABEL: foo:
@@ -84,8 +84,8 @@ define void @foo(i32 %new, i32 %old) {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    nop
 entry:
-  %0 = load i32 *, i32 ** @sym
-  cmpxchg i32 * %0, i32 %new, i32 %old seq_cst seq_cst
+  %0 = load ptr, ptr @sym
+  cmpxchg ptr %0, i32 %new, i32 %old seq_cst seq_cst
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/atomicops.ll b/llvm/test/CodeGen/Mips/atomicops.ll
index 18a48ca5023bc..a67b6206c37eb 100644
--- a/llvm/test/CodeGen/Mips/atomicops.ll
+++ b/llvm/test/CodeGen/Mips/atomicops.ll
@@ -2,9 +2,9 @@
 
 @.str = private unnamed_addr constant [8 x i8] c"%d, %d\0A\00", align 1
 
-define i32 @foo(i32* %mem, i32 %val, i32 %c) nounwind {
+define i32 @foo(ptr %mem, i32 %val, i32 %c) nounwind {
 entry:
-  %0 = atomicrmw add i32* %mem, i32 %val seq_cst
+  %0 = atomicrmw add ptr %mem, i32 %val seq_cst
   %add = add nsw i32 %0, %c
   ret i32 %add
 ; 16-LABEL: foo:
@@ -15,18 +15,18 @@ entry:
 define i32 @main() nounwind {
 entry:
   %x = alloca i32, align 4
-  store volatile i32 0, i32* %x, align 4
-  %0 = atomicrmw add i32* %x, i32 1 seq_cst
+  store volatile i32 0, ptr %x, align 4
+  %0 = atomicrmw add ptr %x, i32 1 seq_cst
   %add.i = add nsw i32 %0, 2
-  %1 = load volatile i32, i32* %x, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind
-  %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst
+  %1 = load volatile i32, ptr %x, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %add.i, i32 %1) nounwind
+  %pair = cmpxchg ptr %x, i32 1, i32 2 seq_cst seq_cst
   %2 = extractvalue { i32, i1 } %pair, 0
-  %3 = load volatile i32, i32* %x, align 4
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind
-  %4 = atomicrmw xchg i32* %x, i32 1 seq_cst
-  %5 = load volatile i32, i32* %x, align 4
-  %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind
+  %3 = load volatile i32, ptr %x, align 4
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %2, i32 %3) nounwind
+  %4 = atomicrmw xchg ptr %x, i32 1 seq_cst
+  %5 = load volatile i32, ptr %x, align 4
+  %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %4, i32 %5) nounwind
 ; 16-LABEL: main:
 ; 16:	lw	${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}})
 ; 16: 	lw	${{[0-9]+}}, %call16(__sync_fetch_and_add_4)(${{[0-9]+}})
@@ -36,6 +36,6 @@ entry:
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
 
 

diff  --git a/llvm/test/CodeGen/Mips/beqzc.ll b/llvm/test/CodeGen/Mips/beqzc.ll
index ebd16cb8204f3..11e009bcc79e2 100644
--- a/llvm/test/CodeGen/Mips/beqzc.ll
+++ b/llvm/test/CodeGen/Mips/beqzc.ll
@@ -6,10 +6,10 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   %. = select i1 %cmp, i32 10, i32 55
-  store i32 %., i32* @j, align 4
+  store i32 %., ptr @j, align 4
 ; cond-b-short: 	beqz	${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}}  # 16 bit inst
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Mips/beqzc1.ll b/llvm/test/CodeGen/Mips/beqzc1.ll
index 488ba928851b7..ad41ae271bb42 100644
--- a/llvm/test/CodeGen/Mips/beqzc1.ll
+++ b/llvm/test/CodeGen/Mips/beqzc1.ll
@@ -6,13 +6,13 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 
 ; cond-b-short: 	bnez	${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]+}}  # 16 bit inst
 if.then:                                          ; preds = %entry
-  store i32 10, i32* @j, align 4
+  store i32 10, ptr @j, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/biggot.ll b/llvm/test/CodeGen/Mips/biggot.ll
index aed1c487e7dba..1080922ade809 100644
--- a/llvm/test/CodeGen/Mips/biggot.ll
+++ b/llvm/test/CodeGen/Mips/biggot.ll
@@ -27,7 +27,7 @@ entry:
 ; N64-DAG: ld  ${{[0-9]+}}, %got_lo(v0)($[[R1]])
 ; N64-DAG: ld  ${{[0-9]+}}, %call_lo(foo0)($[[R3]])
 
-  %0 = load i32, i32* @v0, align 4
+  %0 = load i32, ptr @v0, align 4
   tail call void @foo0(i32 %0) nounwind
   ret void
 }
@@ -36,7 +36,7 @@ declare void @foo0(i32)
 
 ; call to external function.
 
-define void @foo2(i32* nocapture %d, i32* nocapture %s, i32 %n) nounwind {
+define void @foo2(ptr nocapture %d, ptr nocapture %s, i32 %n) nounwind {
 entry:
 ; O32-LABEL: foo2:
 ; O32: lui $[[R2:[0-9]+]], %call_hi(memcpy)
@@ -48,10 +48,8 @@ entry:
 ; N64: daddu  $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}}
 ; N64: ld  ${{[0-9]+}}, %call_lo(memcpy)($[[R3]])
 
-  %0 = bitcast i32* %d to i8*
-  %1 = bitcast i32* %s to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 %n, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 %d, ptr align 4 %s, i32 %n, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/Mips/blockaddr.ll b/llvm/test/CodeGen/Mips/blockaddr.ll
index 120323711bf33..b1ab9a171eb9c 100644
--- a/llvm/test/CodeGen/Mips/blockaddr.ll
+++ b/llvm/test/CodeGen/Mips/blockaddr.ll
@@ -13,11 +13,11 @@
 ; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips32 -mattr=+mips16 \
 ; RUN:     -relocation-model=static < %s | FileCheck %s -check-prefix=STATIC-MIPS16
 
- at reg = common global i8* null, align 4
+ at reg = common global ptr null, align 4
 
-define i8* @dummy(i8* %x) nounwind readnone noinline {
+define ptr @dummy(ptr %x) nounwind readnone noinline {
 entry:
-  ret i8* %x
+  ret ptr %x
 }
 
 ; PIC-O32: lw  $[[R0:[0-9]+]], %got($tmp[[T0:[0-9]+]])
@@ -61,14 +61,14 @@ entry:
 
 define void @f() nounwind {
 entry:
-  %call = tail call i8* @dummy(i8* blockaddress(@f, %baz))
-  indirectbr i8* %call, [label %baz, label %foo]
+  %call = tail call ptr @dummy(ptr blockaddress(@f, %baz))
+  indirectbr ptr %call, [label %baz, label %foo]
 
 foo:                                              ; preds = %foo, %entry
-  store i8* blockaddress(@f, %foo), i8** @reg, align 4
+  store ptr blockaddress(@f, %foo), ptr @reg, align 4
   br label %foo
 
 baz:                                              ; preds = %entry
-  store i8* null, i8** @reg, align 4
+  store ptr null, ptr @reg, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/branch-relaxation-with-hazard.ll b/llvm/test/CodeGen/Mips/branch-relaxation-with-hazard.ll
index 530c3a7d24d44..ce0f2b0268d7b 100644
--- a/llvm/test/CodeGen/Mips/branch-relaxation-with-hazard.ll
+++ b/llvm/test/CodeGen/Mips/branch-relaxation-with-hazard.ll
@@ -4,7 +4,7 @@
 declare i32 @boo(...)
 declare i32 @foo(...)
 
-define i32 @main(i32 signext %argc, i8** %argv) {
+define i32 @main(i32 signext %argc, ptr %argv) {
 ; CHECK: main:
 ; CHECK: # %bb.1:
 ; CHECK-PIC: addiu
@@ -29,38 +29,38 @@ define i32 @main(i32 signext %argc, i8** %argv) {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  %argv.addr = alloca i8**, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
-  store i8** %argv, i8*** %argv.addr, align 4
-  %0 = load i32, i32* %argc.addr, align 4
+  %argv.addr = alloca ptr, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
+  store ptr %argv, ptr %argv.addr, align 4
+  %0 = load i32, ptr %argc.addr, align 4
   %cmp = icmp sgt i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end4
 
 if.then:
   call void asm sideeffect ".space 10", "~{$1}"()
-  %1 = load i32, i32* %argc.addr, align 4
+  %1 = load i32, ptr %argc.addr, align 4
   %cmp1 = icmp sgt i32 %1, 3
   br i1 %cmp1, label %if.then2, label %if.end
 
 if.then2:
   call void asm sideeffect ".space 10", "~{$1}"()
-  %call = call i32 bitcast (i32 (...)* @boo to i32 ()*)()
-  store i32 %call, i32* %retval, align 4
+  %call = call i32 @boo()
+  store i32 %call, ptr %retval, align 4
   br label %return
 
 if.end:
   call void asm sideeffect ".space 4194228", "~{$1}"()
-  %call3 = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
-  store i32 %call3, i32* %retval, align 4
+  %call3 = call i32 @foo()
+  store i32 %call3, ptr %retval, align 4
   br label %return
 
 if.end4:
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 return:
-  %2 = load i32, i32* %retval, align 4
+  %2 = load i32, ptr %retval, align 4
   ret i32 %2
 
 }

diff  --git a/llvm/test/CodeGen/Mips/brconeq.ll b/llvm/test/CodeGen/Mips/brconeq.ll
index 7c3c31e0ec3c4..ba7dc0f8540e6 100644
--- a/llvm/test/CodeGen/Mips/brconeq.ll
+++ b/llvm/test/CodeGen/Mips/brconeq.ll
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp eq i32 %0, %1
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$[[LABEL:[0-9A-Ba-b_]+]]
@@ -15,7 +15,7 @@ entry:
   br i1 %cmp, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/brconeqk.ll b/llvm/test/CodeGen/Mips/brconeqk.ll
index 85d257e8d7970..4ee2f772ff68e 100644
--- a/llvm/test/CodeGen/Mips/brconeqk.ll
+++ b/llvm/test/CodeGen/Mips/brconeqk.ll
@@ -5,14 +5,14 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 10
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	cmpi	${{[0-9]+}}, {{[0-9]+}}
 ; 16:	bteqz	$[[LABEL:[0-9A-Ba-b_]+]]
 ; 16: $[[LABEL]]:
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/brconeqz.ll b/llvm/test/CodeGen/Mips/brconeqz.ll
index cf1beed49bb43..b8e7d1d12f978 100644
--- a/llvm/test/CodeGen/Mips/brconeqz.ll
+++ b/llvm/test/CodeGen/Mips/brconeqz.ll
@@ -5,13 +5,13 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	beqz	${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]]
 ; 16: $[[LABEL]]:
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/brconge.ll b/llvm/test/CodeGen/Mips/brconge.ll
index 910d81c5689a9..38e3a7c3706f5 100644
--- a/llvm/test/CodeGen/Mips/brconge.ll
+++ b/llvm/test/CodeGen/Mips/brconge.ll
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
@@ -18,16 +18,16 @@ entry:
 ; 16: $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result1, align 4
+  store i32 1, ptr @result1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %2 = load i32, i32* @k, align 4
+  %2 = load i32, ptr @k, align 4
   %cmp1 = icmp slt i32 %0, %2
   br i1 %cmp1, label %if.then2, label %if.end3
 
 if.then2:                                         ; preds = %if.end
-  store i32 1, i32* @result1, align 4
+  store i32 1, ptr @result1, align 4
   br label %if.end3
 
 if.end3:                                          ; preds = %if.then2, %if.end

diff  --git a/llvm/test/CodeGen/Mips/brcongt.ll b/llvm/test/CodeGen/Mips/brcongt.ll
index 7dffdb4112118..3231811588fc1 100644
--- a/llvm/test/CodeGen/Mips/brcongt.ll
+++ b/llvm/test/CodeGen/Mips/brcongt.ll
@@ -7,15 +7,15 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.end, label %if.then
 ; 16:	slt	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	btnez	$[[LABEL:[0-9A-Ba-b_]+]]
 ; 16: $[[LABEL]]:
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/brconle.ll b/llvm/test/CodeGen/Mips/brconle.ll
index 4267dc985de3b..e0ade5df23775 100644
--- a/llvm/test/CodeGen/Mips/brconle.ll
+++ b/llvm/test/CodeGen/Mips/brconle.ll
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @i, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 
@@ -18,16 +18,16 @@ entry:
 ; 16: $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result1, align 4
+  store i32 1, ptr @result1, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
-  %2 = load i32, i32* @k, align 4
+  %2 = load i32, ptr @k, align 4
   %cmp1 = icmp sgt i32 %1, %2
   br i1 %cmp1, label %if.then2, label %if.end3
 
 if.then2:                                         ; preds = %if.end
-  store i32 0, i32* @result1, align 4
+  store i32 0, ptr @result1, align 4
   br label %if.end3
 
 if.end3:                                          ; preds = %if.then2, %if.end

diff  --git a/llvm/test/CodeGen/Mips/brconlt.ll b/llvm/test/CodeGen/Mips/brconlt.ll
index 65f6c347b6710..f3dbb9607eaff 100644
--- a/llvm/test/CodeGen/Mips/brconlt.ll
+++ b/llvm/test/CodeGen/Mips/brconlt.ll
@@ -8,8 +8,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @i, align 4
   %cmp = icmp slt i32 %0, %1
   br i1 %cmp, label %if.end, label %if.then
 
@@ -19,7 +19,7 @@ entry:
 ; 16:     $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/brconne.ll b/llvm/test/CodeGen/Mips/brconne.ll
index e0cbe378fe3c6..5c3a0ef343291 100644
--- a/llvm/test/CodeGen/Mips/brconne.ll
+++ b/llvm/test/CodeGen/Mips/brconne.ll
@@ -6,8 +6,8 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %if.end
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}
@@ -16,7 +16,7 @@ entry:
 ; 16: $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/brconnek.ll b/llvm/test/CodeGen/Mips/brconnek.ll
index 0b9234fe3b9d8..30c32825da52e 100644
--- a/llvm/test/CodeGen/Mips/brconnek.ll
+++ b/llvm/test/CodeGen/Mips/brconnek.ll
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @j, align 4
   %cmp = icmp eq i32 %0, 5
   br i1 %cmp, label %if.then, label %if.end
 
@@ -15,7 +15,7 @@ entry:
 ; 16: $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/brconnez.ll b/llvm/test/CodeGen/Mips/brconnez.ll
index eafddccdd4c70..5f8b54e9cbb50 100644
--- a/llvm/test/CodeGen/Mips/brconnez.ll
+++ b/llvm/test/CodeGen/Mips/brconnez.ll
@@ -5,7 +5,7 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @j, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end, !prof !1
 
@@ -14,7 +14,7 @@ entry:
 ; 16: $[[LABEL]]:
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @result, align 4
+  store i32 1, ptr @result, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/brdelayslot.ll b/llvm/test/CodeGen/Mips/brdelayslot.ll
index a02836806e74c..79205373d4566 100644
--- a/llvm/test/CodeGen/Mips/brdelayslot.ll
+++ b/llvm/test/CodeGen/Mips/brdelayslot.ll
@@ -56,20 +56,20 @@ declare void @foo4(double)
 
 define void @foo5(i32 %a) nounwind {
 entry:
-  %0 = load i32, i32* @g2, align 4
+  %0 = load i32, ptr @g2, align 4
   %tobool = icmp eq i32 %a, 0
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
-  %1 = load i32, i32* @g1, align 4
+  %1 = load i32, ptr @g1, align 4
   %add = add nsw i32 %1, %0
-  store i32 %add, i32* @g1, align 4
+  store i32 %add, ptr @g1, align 4
   br label %if.end
 
 if.else:
-  %2 = load i32, i32* @g3, align 4
+  %2 = load i32, ptr @g3, align 4
   %sub = sub nsw i32 %2, %0
-  store i32 %sub, i32* @g3, align 4
+  store i32 %sub, ptr @g3, align 4
   br label %if.end
 
 if.end:
@@ -96,14 +96,14 @@ declare void @foo7(double, float)
 ; STATICO1:      jalr ${{[0-9]+}}
 ; STATICO1-NEXT: sw ${{[0-9]+}}, %lo(g1)
 
- at foo9 = common global void ()* null, align 4
+ at foo9 = common global ptr null, align 4
 
 define i32 @foo8(i32 %a) nounwind {
 entry:
-  store i32 %a, i32* @g1, align 4
-  %0 = load void ()*, void ()** @foo9, align 4
+  store i32 %a, ptr @g1, align 4
+  %0 = load ptr, ptr @foo9, align 4
   call void %0() nounwind
-  %1 = load i32, i32* @g1, align 4
+  %1 = load i32, ptr @g1, align 4
   %add = add nsw i32 %1, %a
   ret i32 %add
 }
@@ -121,9 +121,9 @@ define void @foo10() nounwind {
 entry:
   tail call void @foo11() nounwind
   tail call void @foo11() nounwind
-  store i32 0, i32* @g1, align 4
+  store i32 0, ptr @g1, align 4
   tail call void @foo11() nounwind
-  store i32 0, i32* @g1, align 4
+  store i32 0, ptr @g1, align 4
   ret void
 }
 
@@ -138,7 +138,7 @@ declare void @foo11()
 ; SUCCBB:      bnez ${{[0-9]+}}, $BB
 ; SUCCBB-NEXT: addiu
 
-define i32 @succbbs_loop1(i32* nocapture %a, i32 %n) {
+define i32 @succbbs_loop1(ptr nocapture %a, i32 %n) {
 entry:
   %cmp4 = icmp sgt i32 %n, 0
   br i1 %cmp4, label %for.body, label %for.end
@@ -146,8 +146,8 @@ entry:
 for.body:                                         ; preds = %entry, %for.body
   %s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
   %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
-  %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05
-  %0 = load i32, i32* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %a, i32 %i.05
+  %0 = load i32, ptr %arrayidx, align 4
   %add = add nsw i32 %0, %s.06
   %inc = add nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, %n

diff  --git a/llvm/test/CodeGen/Mips/brind-tailcall.ll b/llvm/test/CodeGen/Mips/brind-tailcall.ll
index 4982d256f73f7..f30394ace9587 100644
--- a/llvm/test/CodeGen/Mips/brind-tailcall.ll
+++ b/llvm/test/CodeGen/Mips/brind-tailcall.ll
@@ -31,9 +31,9 @@ define void @test1(i32 %a) {
 entry:
   %0 = trunc i32 %a to i1
   %1 = select i1 %0,
-              i8* blockaddress(@test1, %bb),
-              i8* blockaddress(@test1, %bb6)
-  indirectbr i8* %1, [label %bb, label %bb6]
+              ptr blockaddress(@test1, %bb),
+              ptr blockaddress(@test1, %bb6)
+  indirectbr ptr %1, [label %bb, label %bb6]
 
 ; STATIC:     PseudoIndirectBranch
 ; STATIC-MM:  PseudoIndirectBranch

diff  --git a/llvm/test/CodeGen/Mips/brind.ll b/llvm/test/CodeGen/Mips/brind.ll
index ed2c3b3dddb74..8f2954fd2b25e 100644
--- a/llvm/test/CodeGen/Mips/brind.ll
+++ b/llvm/test/CodeGen/Mips/brind.ll
@@ -1,6 +1,6 @@
 ; RUN: llc  -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
 
- at main.L = internal unnamed_addr constant [5 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* blockaddress(@main, %L3), i8* blockaddress(@main, %L4), i8* null], align 4
+ at main.L = internal unnamed_addr constant [5 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr blockaddress(@main, %L3), ptr blockaddress(@main, %L4), ptr null], align 4
 @str = private unnamed_addr constant [2 x i8] c"A\00"
 @str5 = private unnamed_addr constant [2 x i8] c"B\00"
 @str6 = private unnamed_addr constant [2 x i8] c"C\00"
@@ -9,32 +9,32 @@
 
 define i32 @main() nounwind {
 entry:
-  %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+  %puts = tail call i32 @puts(ptr @str)
   br label %L1
 
 L1:                                               ; preds = %entry, %L3
   %i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ]
-  %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str5, i32 0, i32 0))
+  %puts5 = tail call i32 @puts(ptr @str5)
   br label %L2
 
 L2:                                               ; preds = %L1, %L3
   %i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ]
-  %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str6, i32 0, i32 0))
+  %puts6 = tail call i32 @puts(ptr @str6)
   br label %L3
 
 L3:                                               ; preds = %L2, %L3
   %i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ]
-  %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str7, i32 0, i32 0))
+  %puts7 = tail call i32 @puts(ptr @str7)
   %inc = add i32 %i.2, 1
-  %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
-  %0 = load i8*, i8** %arrayidx, align 4
-  indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
+  %arrayidx = getelementptr inbounds [5 x ptr], ptr @main.L, i32 0, i32 %i.2
+  %0 = load ptr, ptr %arrayidx, align 4
+  indirectbr ptr %0, [label %L1, label %L2, label %L3, label %L4]
 ; 16: 	jrc	 ${{[0-9]+}}
 L4:                                               ; preds = %L3
-  %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str8, i32 0, i32 0))
+  %puts8 = tail call i32 @puts(ptr @str8)
   ret i32 0
 }
 
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
 
 

diff  --git a/llvm/test/CodeGen/Mips/brundef.ll b/llvm/test/CodeGen/Mips/brundef.ll
index 802556c7cabd1..d63b587af0395 100644
--- a/llvm/test/CodeGen/Mips/brundef.ll
+++ b/llvm/test/CodeGen/Mips/brundef.ll
@@ -5,7 +5,7 @@
 define void @ham() {
 bb:
   %tmp = alloca i32, align 4
-  %tmp13 = ptrtoint i32* %tmp to i32
+  %tmp13 = ptrtoint ptr %tmp to i32
   %tmp70 = icmp eq i32 undef, -1
   br i1 %tmp70, label %bb72, label %bb40
 
@@ -14,13 +14,13 @@ bb72:                                             ; preds = %bb72, %bb
 
 bb40:                                             ; preds = %bb72, %bb
   %tmp41 = phi i32 [ %tmp13, %bb72 ], [ %tmp13, %bb ]
-  %tmp55 = inttoptr i32 %tmp41 to i32*
-  %tmp58 = insertelement <2 x i32*> undef, i32* %tmp55, i32 1
+  %tmp55 = inttoptr i32 %tmp41 to ptr
+  %tmp58 = insertelement <2 x ptr> undef, ptr %tmp55, i32 1
   br label %bb59
 
 bb59:                                             ; preds = %bb59, %bb40
-  %tmp60 = phi <2 x i32*> [ %tmp61, %bb59 ], [ %tmp58, %bb40 ]
-  %tmp61 = getelementptr i32, <2 x i32*> %tmp60, <2 x i32> <i32 -1, i32 1>
-  %tmp62 = extractelement <2 x i32*> %tmp61, i32 1
+  %tmp60 = phi <2 x ptr> [ %tmp61, %bb59 ], [ %tmp58, %bb40 ]
+  %tmp61 = getelementptr i32, <2 x ptr> %tmp60, <2 x i32> <i32 -1, i32 1>
+  %tmp62 = extractelement <2 x ptr> %tmp61, i32 1
   br label %bb59
 }

diff  --git a/llvm/test/CodeGen/Mips/buildpairextractelementf64.ll b/llvm/test/CodeGen/Mips/buildpairextractelementf64.ll
index 19ef04f040d89..95b9ca8caf029 100644
--- a/llvm/test/CodeGen/Mips/buildpairextractelementf64.ll
+++ b/llvm/test/CodeGen/Mips/buildpairextractelementf64.ll
@@ -17,7 +17,7 @@
 
 define double @f(i32 %a1, double %d) nounwind {
 entry:
-  store i32 %a1, i32* @a, align 4
+  store i32 %a1, ptr @a, align 4
   %add = fadd double %d, 2.000000e+00
   ret double %add
 }

diff  --git a/llvm/test/CodeGen/Mips/cache-intrinsic.ll b/llvm/test/CodeGen/Mips/cache-intrinsic.ll
index 987032eaeb89f..3886d579215af 100644
--- a/llvm/test/CodeGen/Mips/cache-intrinsic.ll
+++ b/llvm/test/CodeGen/Mips/cache-intrinsic.ll
@@ -9,18 +9,18 @@ target triple = "mips--linux-gnu"
 define i32 @main() {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0))
-  %call1 = call i8* @strcpy(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds ([25 x i8], [25 x i8]* @.str1, i32 0, i32 0)) #3
-  call void @llvm.clear_cache(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i8* getelementptr inbounds (i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0), i32 32)) #3
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @buffer, i32 0, i32 0))
+  store i32 0, ptr %retval
+  %call = call i32 (ptr, ...) @printf(ptr @.str, ptr @buffer)
+  %call1 = call ptr @strcpy(ptr @buffer, ptr @.str1) #3
+  call void @llvm.clear_cache(ptr @buffer, ptr getelementptr inbounds (i8, ptr @buffer, i32 32)) #3
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, ptr @buffer)
   ret i32 0
 }
 
 ; CHECK: __clear_cache
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
-declare i8* @strcpy(i8*, i8*)
+declare ptr @strcpy(ptr, ptr)
 
-declare void @llvm.clear_cache(i8*, i8*)
+declare void @llvm.clear_cache(ptr, ptr)

diff  --git a/llvm/test/CodeGen/Mips/call-optimization.ll b/llvm/test/CodeGen/Mips/call-optimization.ll
index 0be5ff2ef6e32..89fb970ed8178 100644
--- a/llvm/test/CodeGen/Mips/call-optimization.ll
+++ b/llvm/test/CodeGen/Mips/call-optimization.ll
@@ -82,9 +82,9 @@ define void @caller4(double %d) {
 entry:
   %call = tail call double @ceil(double %d)
   %call1 = tail call double @ceil(double %call)
-  store double %call1, double* @gd2, align 8
+  store double %call1, ptr @gd2, align 8
   %call2 = tail call double @ceil(double %call1)
-  store double %call2, double* @gd1, align 8
+  store double %call2, ptr @gd1, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-float.ll b/llvm/test/CodeGen/Mips/cconv/arguments-float.ll
index ab25704cefc11..3a39a29de02fe 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-float.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-float.ll
@@ -24,24 +24,24 @@
 define void @double_args(double %a, double %b, double %c, double %d, double %e,
                          double %f, double %g, double %h, double %i) nounwind {
 entry:
-        %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
-        store volatile double %a, double* %0
-        %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
-        store volatile double %b, double* %1
-        %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
-        store volatile double %c, double* %2
-        %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
-        store volatile double %d, double* %3
-        %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
-        store volatile double %e, double* %4
-        %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
-        store volatile double %f, double* %5
-        %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
-        store volatile double %g, double* %6
-        %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
-        store volatile double %h, double* %7
-        %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
-        store volatile double %i, double* %8
+        %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+        store volatile double %a, ptr %0
+        %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+        store volatile double %b, ptr %1
+        %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3
+        store volatile double %c, ptr %2
+        %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4
+        store volatile double %d, ptr %3
+        %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5
+        store volatile double %e, ptr %4
+        %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6
+        store volatile double %f, ptr %5
+        %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7
+        store volatile double %g, ptr %6
+        %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8
+        store volatile double %h, ptr %7
+        %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9
+        store volatile double %i, ptr %8
         ret void
 }
 
@@ -105,26 +105,26 @@ define void @float_args(float %a, float %b, float %c, float %d, float %e,
                         float %f, float %g, float %h, float %i, float %j)
                        nounwind {
 entry:
-        %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
-        store volatile float %a, float* %0
-        %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
-        store volatile float %b, float* %1
-        %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
-        store volatile float %c, float* %2
-        %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
-        store volatile float %d, float* %3
-        %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
-        store volatile float %e, float* %4
-        %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
-        store volatile float %f, float* %5
-        %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
-        store volatile float %g, float* %6
-        %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
-        store volatile float %h, float* %7
-        %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
-        store volatile float %i, float* %8
-        %9 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 10
-        store volatile float %j, float* %9
+        %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+        store volatile float %a, ptr %0
+        %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+        store volatile float %b, ptr %1
+        %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3
+        store volatile float %c, ptr %2
+        %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4
+        store volatile float %d, ptr %3
+        %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5
+        store volatile float %e, ptr %4
+        %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6
+        store volatile float %f, ptr %5
+        %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7
+        store volatile float %g, ptr %6
+        %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8
+        store volatile float %h, ptr %7
+        %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9
+        store volatile float %i, ptr %8
+        %9 = getelementptr [11 x float], ptr @floats, i32 0, i32 10
+        store volatile float %j, ptr %9
         ret void
 }
 
@@ -170,10 +170,10 @@ entry:
 
 define void @double_arg2(i8 %a, double %b) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
-        store volatile double %b, double* %1
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+        store volatile double %b, ptr %1
         ret void
 }
 
@@ -197,10 +197,10 @@ entry:
 
 define void @float_arg2(i8 signext %a, float %b) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
-        store volatile float %b, float* %1
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+        store volatile float %b, ptr %1
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-fp128.ll b/llvm/test/CodeGen/Mips/cconv/arguments-fp128.ll
index 024d7c806ac21..7efae8e2a43f1 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-fp128.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-fp128.ll
@@ -13,16 +13,16 @@
 
 define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
 entry:
-        %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
-        store volatile fp128 %a, fp128* %0
-        %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
-        store volatile fp128 %b, fp128* %1
-        %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
-        store volatile fp128 %c, fp128* %2
-        %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
-        store volatile fp128 %d, fp128* %3
-        %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
-        store volatile fp128 %e, fp128* %4
+        %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1
+        store volatile fp128 %a, ptr %0
+        %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2
+        store volatile fp128 %b, ptr %1
+        %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3
+        store volatile fp128 %c, ptr %2
+        %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4
+        store volatile fp128 %d, ptr %3
+        %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5
+        store volatile fp128 %e, ptr %4
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
index cd4d0deb917c3..eaa286414c11d 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-float-varargs.ll
@@ -33,16 +33,15 @@
 define void @double_args(double %a, ...)
                          nounwind {
 entry:
-        %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
-        store volatile double %a, double* %0
-
-        %ap = alloca i8*
-        %ap2 = bitcast i8** %ap to i8*
-        call void @llvm.va_start(i8* %ap2)
-        %b = va_arg i8** %ap, double
-        %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
-        store volatile double %b, double* %1
-        call void @llvm.va_end(i8* %ap2)
+        %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+        store volatile double %a, ptr %0
+
+        %ap = alloca ptr
+        call void @llvm.va_start(ptr %ap)
+        %b = va_arg ptr %ap, double
+        %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+        store volatile double %b, ptr %1
+        call void @llvm.va_end(ptr %ap)
         ret void
 }
 
@@ -95,16 +94,15 @@ entry:
 
 define void @float_args(float %a, ...) nounwind {
 entry:
-        %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
-        store volatile float %a, float* %0
-
-        %ap = alloca i8*
-        %ap2 = bitcast i8** %ap to i8*
-        call void @llvm.va_start(i8* %ap2)
-        %b = va_arg i8** %ap, float
-        %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
-        store volatile float %b, float* %1
-        call void @llvm.va_end(i8* %ap2)
+        %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+        store volatile float %a, ptr %0
+
+        %ap = alloca ptr
+        call void @llvm.va_start(ptr %ap)
+        %b = va_arg ptr %ap, float
+        %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+        store volatile float %b, ptr %1
+        call void @llvm.va_end(ptr %ap)
         ret void
 }
 
@@ -159,6 +157,6 @@ entry:
 ; NEWBE-DAG:         lwc1 [[FTMP1:\$f[0-9]+]], 12($sp)
 ; ALL-DAG:           swc1 [[FTMP1]], 8([[R2]])
 
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_copy(i8*, i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_copy(ptr, ptr)
+declare void @llvm.va_end(ptr)

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-float.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-float.ll
index 3e2b6cb46e24a..4edc463718188 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-hard-float.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-float.ll
@@ -24,24 +24,24 @@
 define void @double_args(double %a, double %b, double %c, double %d, double %e,
                          double %f, double %g, double %h, double %i) nounwind {
 entry:
-        %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
-        store volatile double %a, double* %0
-        %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
-        store volatile double %b, double* %1
-        %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
-        store volatile double %c, double* %2
-        %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
-        store volatile double %d, double* %3
-        %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
-        store volatile double %e, double* %4
-        %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
-        store volatile double %f, double* %5
-        %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
-        store volatile double %g, double* %6
-        %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
-        store volatile double %h, double* %7
-        %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
-        store volatile double %i, double* %8
+        %0 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+        store volatile double %a, ptr %0
+        %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 2
+        store volatile double %b, ptr %1
+        %2 = getelementptr [11 x double], ptr @doubles, i32 0, i32 3
+        store volatile double %c, ptr %2
+        %3 = getelementptr [11 x double], ptr @doubles, i32 0, i32 4
+        store volatile double %d, ptr %3
+        %4 = getelementptr [11 x double], ptr @doubles, i32 0, i32 5
+        store volatile double %e, ptr %4
+        %5 = getelementptr [11 x double], ptr @doubles, i32 0, i32 6
+        store volatile double %f, ptr %5
+        %6 = getelementptr [11 x double], ptr @doubles, i32 0, i32 7
+        store volatile double %g, ptr %6
+        %7 = getelementptr [11 x double], ptr @doubles, i32 0, i32 8
+        store volatile double %h, ptr %7
+        %8 = getelementptr [11 x double], ptr @doubles, i32 0, i32 9
+        store volatile double %i, ptr %8
         ret void
 }
 
@@ -87,24 +87,24 @@ entry:
 define void @float_args(float %a, float %b, float %c, float %d, float %e,
                         float %f, float %g, float %h, float %i) nounwind {
 entry:
-        %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
-        store volatile float %a, float* %0
-        %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
-        store volatile float %b, float* %1
-        %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
-        store volatile float %c, float* %2
-        %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
-        store volatile float %d, float* %3
-        %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
-        store volatile float %e, float* %4
-        %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
-        store volatile float %f, float* %5
-        %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
-        store volatile float %g, float* %6
-        %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
-        store volatile float %h, float* %7
-        %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
-        store volatile float %i, float* %8
+        %0 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+        store volatile float %a, ptr %0
+        %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 2
+        store volatile float %b, ptr %1
+        %2 = getelementptr [11 x float], ptr @floats, i32 0, i32 3
+        store volatile float %c, ptr %2
+        %3 = getelementptr [11 x float], ptr @floats, i32 0, i32 4
+        store volatile float %d, ptr %3
+        %4 = getelementptr [11 x float], ptr @floats, i32 0, i32 5
+        store volatile float %e, ptr %4
+        %5 = getelementptr [11 x float], ptr @floats, i32 0, i32 6
+        store volatile float %f, ptr %5
+        %6 = getelementptr [11 x float], ptr @floats, i32 0, i32 7
+        store volatile float %g, ptr %6
+        %7 = getelementptr [11 x float], ptr @floats, i32 0, i32 8
+        store volatile float %h, ptr %7
+        %8 = getelementptr [11 x float], ptr @floats, i32 0, i32 9
+        store volatile float %i, ptr %8
         ret void
 }
 
@@ -155,10 +155,10 @@ entry:
 
 define void @double_arg2(i8 %a, double %b) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
-        store volatile double %b, double* %1
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x double], ptr @doubles, i32 0, i32 1
+        store volatile double %b, ptr %1
         ret void
 }
 
@@ -186,10 +186,10 @@ entry:
 
 define void @float_arg2(i8 %a, float %b) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
-        store volatile float %b, float* %1
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x float], ptr @floats, i32 0, i32 1
+        store volatile float %b, ptr %1
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll b/llvm/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
index 10ad9b0ca8c4a..d65cc754efdaf 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-hard-fp128.ll
@@ -13,16 +13,16 @@
 
 define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
 entry:
-        %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
-        store volatile fp128 %a, fp128* %0
-        %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
-        store volatile fp128 %b, fp128* %1
-        %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
-        store volatile fp128 %c, fp128* %2
-        %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
-        store volatile fp128 %d, fp128* %3
-        %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
-        store volatile fp128 %e, fp128* %4
+        %0 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 1
+        store volatile fp128 %a, ptr %0
+        %1 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 2
+        store volatile fp128 %b, ptr %1
+        %2 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 3
+        store volatile fp128 %c, ptr %2
+        %3 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 4
+        store volatile fp128 %d, ptr %3
+        %4 = getelementptr [11 x fp128], ptr @ldoubles, i32 0, i32 5
+        store volatile fp128 %e, ptr %4
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll b/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll
index 33d1a4fe1b702..f2355049adfaa 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-small-structures-bigger-than-32bits.ll
@@ -43,18 +43,15 @@
 declare void @fS1(i48 inreg) #1
 declare void @fS2(i40 inreg) #1
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #2
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
 
 define void @f1() #0 {
 entry:
   %s1_1 = alloca %struct.S1, align 2
   %s1_1.coerce = alloca { i48 }
-  %0 = bitcast { i48 }* %s1_1.coerce to i8*
-  %1 = bitcast %struct.S1* %s1_1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 6, i1 false)
-  %2 = getelementptr { i48 }, { i48 }* %s1_1.coerce, i32 0, i32 0
-  %3 = load i48, i48* %2, align 1
-  call void @fS1(i48 inreg %3)
+  call void @llvm.memcpy.p0.p0.i64(ptr %s1_1.coerce, ptr %s1_1, i64 6, i1 false)
+  %0 = load i48, ptr %s1_1.coerce, align 1
+  call void @fS1(i48 inreg %0)
   ret void
  ; ALL-LABEL: f1:
 
@@ -66,12 +63,9 @@ define void @f2() #0 {
 entry:
   %s2_1 = alloca %struct.S2, align 1
   %s2_1.coerce = alloca { i40 }
-  %0 = bitcast { i40 }* %s2_1.coerce to i8*
-  %1 = bitcast %struct.S2* %s2_1 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 5, i1 false)
-  %2 = getelementptr { i40 }, { i40 }* %s2_1.coerce, i32 0, i32 0
-  %3 = load i40, i40* %2, align 1
-  call void @fS2(i40 inreg %3)
+  call void @llvm.memcpy.p0.p0.i64(ptr %s2_1.coerce, ptr %s2_1, i64 5, i1 false)
+  %0 = load i40, ptr %s2_1.coerce, align 1
+  call void @fS2(i40 inreg %0)
   ret void
  ; ALL-LABEL: f2:
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-struct.ll b/llvm/test/CodeGen/Mips/cconv/arguments-struct.ll
index 6288b5d52fd97..371459f5f85b0 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-struct.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-struct.ll
@@ -19,7 +19,7 @@
 
 define void @s_i8(i8 inreg %a) nounwind {
 entry:
-	store i8 %a, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @bytes, i32 0, i32 1)
+	store i8 %a, ptr getelementptr inbounds ([2 x i8], ptr @bytes, i32 0, i32 1)
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
index 5009c9efb4362..0619ebe48cfe6 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
@@ -134,129 +134,109 @@
 
 @.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
 
-declare void @varArgF_SmallStruct(i8* %c, ...) 
+declare void @varArgF_SmallStruct(ptr %c, ...) 
 
-define void @smallStruct_1b(%struct.SmallStruct_1b* %ss) #0 {
+define void @smallStruct_1b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_1b*, align 8
-  store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
-  %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0
-  %3 = load i8, i8* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i8, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i8 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_1b: 
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56
 }
 
-define void @smallStruct_2b(%struct.SmallStruct_2b* %ss) #0 {
+define void @smallStruct_2b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_2b*, align 8
-  store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_2b*, %struct.SmallStruct_2b** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }*
-  %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0
-  %3 = load i16, i16* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i16 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i16, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i16 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_2b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 48
 }
 
-define void @smallStruct_3b(%struct.SmallStruct_3b* %ss) #0 {
+define void @smallStruct_3b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_3b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i24 }
-  store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8
-  %1 = bitcast { i24 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_3b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i1 false)
-  %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0
-  %4 = load i24, i24* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i24 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 3, i1 false)
+  %1 = load i24, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i24 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_3b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 40
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
 
-define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 {
+define void @smallStruct_4b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_4b*, align 8
-  store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_4b*, %struct.SmallStruct_4b** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }*
-  %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
-  %3 = load i32, i32* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i32, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i32 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_4b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
 }
 
-define void @smallStruct_5b(%struct.SmallStruct_5b* %ss) #0 {
+define void @smallStruct_5b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_5b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i40 }
-  store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8
-  %1 = bitcast { i40 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_5b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i1 false)
-  %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0
-  %4 = load i40, i40* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i40 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 5, i1 false)
+  %1 = load i40, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i40 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_5b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 24
 }
 
-define void @smallStruct_6b(%struct.SmallStruct_6b* %ss) #0 {
+define void @smallStruct_6b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_6b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i48 }
-  store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8
-  %1 = bitcast { i48 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_6b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
-  %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48, i48* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+  %1 = load i48, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_6b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
 }
 
-define void @smallStruct_7b(%struct.SmallStruct_7b* %ss) #0 {
+define void @smallStruct_7b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_7b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i56 }
-  store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8
-  %1 = bitcast { i56 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_7b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i1 false)
-  %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0
-  %4 = load i56, i56* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i56 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 7, i1 false)
+  %1 = load i56, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i56 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_7b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 8
 }
 
-define void @smallStruct_8b(%struct.SmallStruct_8b* %ss) #0 {
+define void @smallStruct_8b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_8b*, align 8
-  store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_8b*, %struct.SmallStruct_8b** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }*
-  %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64, i64* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i64, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_8b:
  ; Check that the structure is not shifted before the pointer to str is loaded.
@@ -264,20 +244,18 @@ entry:
  ; CHECK: lui
 }
 
-define void @smallStruct_9b(%struct.SmallStruct_9b* %ss) #0 {
+define void @smallStruct_9b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_9b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i64, i8 }
-  store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8
-  %1 = bitcast { i64, i8 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_9b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i1 false)
-  %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0
-  %4 = load i64, i64* %3, align 1
-  %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1
-  %6 = load i8, i8* %5, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 9, i1 false)
+  %1 = getelementptr { i64, i8 }, ptr %.coerce, i32 0, i32 0
+  %2 = load i64, ptr %1, align 1
+  %3 = getelementptr { i64, i8 }, ptr %.coerce, i32 0, i32 1
+  %4 = load i8, ptr %3, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %2, i8 inreg %4)
   ret void
  ; CHECK-LABEL: smallStruct_9b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
index d3c8f280c59dc..db066c0475142 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
@@ -68,83 +68,71 @@
 
 @.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
 
-declare void @varArgF_SmallStruct(i8* %c, ...) 
+declare void @varArgF_SmallStruct(ptr %c, ...) 
 
-define void @smallStruct_1b1s(%struct.SmallStruct_1b1s* %ss) #0 {
+define void @smallStruct_1b1s(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8
-  store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1s*, %struct.SmallStruct_1b1s** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }*
-  %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
-  %3 = load i32, i32* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i32, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i32 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_1b1s:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 32
 }
 
-define void @smallStruct_1b1i(%struct.SmallStruct_1b1i* %ss) #0 {
+define void @smallStruct_1b1i(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8
-  store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1i*, %struct.SmallStruct_1b1i** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }*
-  %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64, i64* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i64, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_1b1i:
  ; CHECK-NOT: dsll
  ; CHECK: lui
 }
 
-define void @smallStruct_1b1s1b(%struct.SmallStruct_1b1s1b* %ss) #0 {
+define void @smallStruct_1b1s1b(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i48 }
-  store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8
-  %1 = bitcast { i48 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
-  %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48, i48* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+  %1 = load i48, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_1b1s1b:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
 
-define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 {
+define void @smallStruct_1s1i(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8
-  store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_1s1i*, %struct.SmallStruct_1s1i** %ss.addr, align 8
-  %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }*
-  %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
-  %3 = load i64, i64* %2, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
+  %ss.addr = alloca ptr, align 8
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  %1 = load i64, ptr %0, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i64 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_1s1i:
  ; CHECK-NOT: dsll
  ; CHECK: lui
 }
 
-define void @smallStruct_3b1s(%struct.SmallStruct_3b1s* %ss) #0 {
+define void @smallStruct_3b1s(ptr %ss) #0 {
 entry:
-  %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8
+  %ss.addr = alloca ptr, align 8
   %.coerce = alloca { i48 }
-  store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8
-  %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8
-  %1 = bitcast { i48 }* %.coerce to i8*
-  %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i1 false)
-  %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
-  %4 = load i48, i48* %3, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
+  store ptr %ss, ptr %ss.addr, align 8
+  %0 = load ptr, ptr %ss.addr, align 8
+  call void @llvm.memcpy.p0.p0.i64(ptr %.coerce, ptr %0, i64 6, i1 false)
+  %1 = load i48, ptr %.coerce, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i48 inreg %1)
   ret void
  ; CHECK-LABEL: smallStruct_3b1s:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 16

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
index a4ac5e7bd8a6e..3bddbf9d450ab 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
@@ -88,65 +88,47 @@
 
 @.str = private unnamed_addr constant [3 x i8] c"01\00", align 1
 
-declare void @varArgF_SmallStruct(i8* %c, ...) 
+declare void @varArgF_SmallStruct(ptr %c, ...) 
 
-define void @smallStruct_1b_x9(%struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b* %ss9) #0 {
+define void @smallStruct_1b_x9(ptr %ss1, ptr %ss2, ptr %ss3, ptr %ss4, ptr %ss5, ptr %ss6, ptr %ss7, ptr %ss8, ptr %ss9) #0 {
 entry:
-  %ss1.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss2.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss3.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss4.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss5.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss6.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss7.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss8.addr = alloca %struct.SmallStruct_1b*, align 8
-  %ss9.addr = alloca %struct.SmallStruct_1b*, align 8
-  store %struct.SmallStruct_1b* %ss1, %struct.SmallStruct_1b** %ss1.addr, align 8
-  store %struct.SmallStruct_1b* %ss2, %struct.SmallStruct_1b** %ss2.addr, align 8
-  store %struct.SmallStruct_1b* %ss3, %struct.SmallStruct_1b** %ss3.addr, align 8
-  store %struct.SmallStruct_1b* %ss4, %struct.SmallStruct_1b** %ss4.addr, align 8
-  store %struct.SmallStruct_1b* %ss5, %struct.SmallStruct_1b** %ss5.addr, align 8
-  store %struct.SmallStruct_1b* %ss6, %struct.SmallStruct_1b** %ss6.addr, align 8
-  store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8
-  store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8
-  store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8
-  %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss1.addr, align 8
-  %1 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss2.addr, align 8
-  %2 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss3.addr, align 8
-  %3 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss4.addr, align 8
-  %4 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss5.addr, align 8
-  %5 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss6.addr, align 8
-  %6 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss7.addr, align 8
-  %7 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss8.addr, align 8
-  %8 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss9.addr, align 8
-  %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
-  %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0
-  %11 = load i8, i8* %10, align 1
-  %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }*
-  %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0
-  %14 = load i8, i8* %13, align 1
-  %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }*
-  %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0
-  %17 = load i8, i8* %16, align 1
-  %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }*
-  %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0
-  %20 = load i8, i8* %19, align 1
-  %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }*
-  %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0
-  %23 = load i8, i8* %22, align 1
-  %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }*
-  %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0
-  %26 = load i8, i8* %25, align 1
-  %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }*
-  %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0
-  %29 = load i8, i8* %28, align 1
-  %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }*
-  %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0
-  %32 = load i8, i8* %31, align 1
-  %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }*
-  %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0
-  %35 = load i8, i8* %34, align 1
-  call void (i8*, ...) @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35)
+  %ss1.addr = alloca ptr, align 8
+  %ss2.addr = alloca ptr, align 8
+  %ss3.addr = alloca ptr, align 8
+  %ss4.addr = alloca ptr, align 8
+  %ss5.addr = alloca ptr, align 8
+  %ss6.addr = alloca ptr, align 8
+  %ss7.addr = alloca ptr, align 8
+  %ss8.addr = alloca ptr, align 8
+  %ss9.addr = alloca ptr, align 8
+  store ptr %ss1, ptr %ss1.addr, align 8
+  store ptr %ss2, ptr %ss2.addr, align 8
+  store ptr %ss3, ptr %ss3.addr, align 8
+  store ptr %ss4, ptr %ss4.addr, align 8
+  store ptr %ss5, ptr %ss5.addr, align 8
+  store ptr %ss6, ptr %ss6.addr, align 8
+  store ptr %ss7, ptr %ss7.addr, align 8
+  store ptr %ss8, ptr %ss8.addr, align 8
+  store ptr %ss9, ptr %ss9.addr, align 8
+  %0 = load ptr, ptr %ss1.addr, align 8
+  %1 = load ptr, ptr %ss2.addr, align 8
+  %2 = load ptr, ptr %ss3.addr, align 8
+  %3 = load ptr, ptr %ss4.addr, align 8
+  %4 = load ptr, ptr %ss5.addr, align 8
+  %5 = load ptr, ptr %ss6.addr, align 8
+  %6 = load ptr, ptr %ss7.addr, align 8
+  %7 = load ptr, ptr %ss8.addr, align 8
+  %8 = load ptr, ptr %ss9.addr, align 8
+  %9 = load i8, ptr %0, align 1
+  %10 = load i8, ptr %1, align 1
+  %11 = load i8, ptr %2, align 1
+  %12 = load i8, ptr %3, align 1
+  %13 = load i8, ptr %4, align 1
+  %14 = load i8, ptr %5, align 1
+  %15 = load i8, ptr %6, align 1
+  %16 = load i8, ptr %7, align 1
+  %17 = load i8, ptr %8, align 1
+  call void (ptr, ...) @varArgF_SmallStruct(ptr @.str, i8 inreg %9, i8 inreg %10, i8 inreg %11, i8 inreg %12, i8 inreg %13, i8 inreg %14, i8 inreg %15, i8 inreg %16, i8 inreg %17)
   ret void
  ; CHECK-LABEL: smallStruct_1b_x9:
  ; CHECK: dsll $[[R1:[0-9]+]], $[[R2:[0-9]+]], 56

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll
index c1b9e335142e3..1f97b376c290b 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs.ll
@@ -118,21 +118,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sh [[ARG2]], 4([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i16
-  %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
-  store volatile i16 %arg1, i16* %e1, align 2
+  %arg1 = va_arg ptr %ap, i16
+  %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+  store volatile i16 %arg1, ptr %e1, align 2
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i16
-  %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
-  store volatile i16 %arg2, i16* %e2, align 2
+  %arg2 = va_arg ptr %ap, i16
+  %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+  store volatile i16 %arg2, ptr %e2, align 2
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -233,21 +232,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sw [[ARG2]], 8([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i32
-  %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
-  store volatile i32 %arg1, i32* %e1, align 4
+  %arg1 = va_arg ptr %ap, i32
+  %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+  store volatile i32 %arg1, ptr %e1, align 4
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i32
-  %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
-  store volatile i32 %arg2, i32* %e2, align 4
+  %arg2 = va_arg ptr %ap, i32
+  %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+  store volatile i32 %arg2, ptr %e2, align 4
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -355,21 +353,20 @@ entry:
 ; NEW-DAG:       ld [[ARG2:\$[0-9]+]], 0([[VA2]])
 ; NEW-DAG:       sd [[ARG2]], 16([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i64
-  %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
-  store volatile i64 %arg1, i64* %e1, align 8
+  %arg1 = va_arg ptr %ap, i64
+  %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+  store volatile i64 %arg1, ptr %e1, align 8
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i64
-  %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
-  store volatile i64 %arg2, i64* %e2, align 8
+  %arg2 = va_arg ptr %ap, i64
+  %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+  store volatile i64 %arg2, ptr %e2, align 8
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -470,21 +467,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sh [[ARG2]], 4([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i16
-  %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
-  store volatile i16 %arg1, i16* %e1, align 2
+  %arg1 = va_arg ptr %ap, i16
+  %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+  store volatile i16 %arg1, ptr %e1, align 2
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i16
-  %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
-  store volatile i16 %arg2, i16* %e2, align 2
+  %arg2 = va_arg ptr %ap, i16
+  %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+  store volatile i16 %arg2, ptr %e2, align 2
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -585,21 +581,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sw [[ARG2]], 8([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i32
-  %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
-  store volatile i32 %arg1, i32* %e1, align 4
+  %arg1 = va_arg ptr %ap, i32
+  %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+  store volatile i32 %arg1, ptr %e1, align 4
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i32
-  %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
-  store volatile i32 %arg2, i32* %e2, align 4
+  %arg2 = va_arg ptr %ap, i32
+  %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+  store volatile i32 %arg2, ptr %e2, align 4
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -707,21 +702,20 @@ entry:
 ; NEW-DAG:       ld [[ARG2:\$[0-9]+]], 0([[VA2]])
 ; NEW-DAG:       sd [[ARG2]], 16([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i64
-  %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
-  store volatile i64 %arg1, i64* %e1, align 8
+  %arg1 = va_arg ptr %ap, i64
+  %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+  store volatile i64 %arg1, ptr %e1, align 8
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i64
-  %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
-  store volatile i64 %arg2, i64* %e2, align 8
+  %arg2 = va_arg ptr %ap, i64
+  %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+  store volatile i64 %arg2, ptr %e2, align 8
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -821,21 +815,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sh [[ARG2]], 4([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i16
-  %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
-  store volatile i16 %arg1, i16* %e1, align 2
+  %arg1 = va_arg ptr %ap, i16
+  %e1 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 1
+  store volatile i16 %arg1, ptr %e1, align 2
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i16
-  %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
-  store volatile i16 %arg2, i16* %e2, align 2
+  %arg2 = va_arg ptr %ap, i16
+  %e2 = getelementptr [3 x i16], ptr @hwords, i32 0, i32 2
+  store volatile i16 %arg2, ptr %e2, align 2
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -935,21 +928,20 @@ entry:
 ; Copy the arg to the global
 ; ALL-DAG:       sw [[ARG2]], 8([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i32
-  %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
-  store volatile i32 %arg1, i32* %e1, align 4
+  %arg1 = va_arg ptr %ap, i32
+  %e1 = getelementptr [3 x i32], ptr @words, i32 0, i32 1
+  store volatile i32 %arg1, ptr %e1, align 4
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i32
-  %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
-  store volatile i32 %arg2, i32* %e2, align 4
+  %arg2 = va_arg ptr %ap, i32
+  %e2 = getelementptr [3 x i32], ptr @words, i32 0, i32 2
+  store volatile i32 %arg2, ptr %e2, align 4
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
@@ -1056,24 +1048,23 @@ entry:
 ; NEW-DAG:       ld [[ARG2:\$[0-9]+]], 0([[VA2]])
 ; NEW-DAG:       sd [[ARG2]], 16([[GV]])
 
-  %ap = alloca i8*, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap2)
+  %ap = alloca ptr, align 8
+  call void @llvm.va_start(ptr %ap)
 
   call void asm sideeffect "teqi $$zero, 1", ""()
-  %arg1 = va_arg i8** %ap, i64
-  %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
-  store volatile i64 %arg1, i64* %e1, align 8
+  %arg1 = va_arg ptr %ap, i64
+  %e1 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 1
+  store volatile i64 %arg1, ptr %e1, align 8
 
   call void asm sideeffect "teqi $$zero, 2", ""()
-  %arg2 = va_arg i8** %ap, i64
-  %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
-  store volatile i64 %arg2, i64* %e2, align 8
+  %arg2 = va_arg ptr %ap, i64
+  %e2 = getelementptr [3 x i64], ptr @dwords, i32 0, i32 2
+  store volatile i64 %arg2, ptr %e2, align 8
 
-  call void @llvm.va_end(i8* %ap2)
+  call void @llvm.va_end(ptr %ap)
 
   ret void
 }
 
-declare void @llvm.va_start(i8*)
-declare void @llvm.va_end(i8*)
+declare void @llvm.va_start(ptr)
+declare void @llvm.va_end(ptr)

diff  --git a/llvm/test/CodeGen/Mips/cconv/arguments.ll b/llvm/test/CodeGen/Mips/cconv/arguments.ll
index ed5f203029f6e..75403ec8a4657 100644
--- a/llvm/test/CodeGen/Mips/cconv/arguments.ll
+++ b/llvm/test/CodeGen/Mips/cconv/arguments.ll
@@ -28,26 +28,26 @@ define void @align_to_arg_slots(i8 signext %a, i8 signext %b, i8 signext %c,
                                 i8 signext %g, i8 signext %h, i8 signext %i,
                                 i8 signext %j) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
-        store volatile i8 %b, i8* %1
-        %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
-        store volatile i8 %c, i8* %2
-        %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
-        store volatile i8 %d, i8* %3
-        %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
-        store volatile i8 %e, i8* %4
-        %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
-        store volatile i8 %f, i8* %5
-        %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
-        store volatile i8 %g, i8* %6
-        %7 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 8
-        store volatile i8 %h, i8* %7
-        %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 9
-        store volatile i8 %i, i8* %8
-        %9 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 10
-        store volatile i8 %j, i8* %9
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 2
+        store volatile i8 %b, ptr %1
+        %2 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 3
+        store volatile i8 %c, ptr %2
+        %3 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 4
+        store volatile i8 %d, ptr %3
+        %4 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 5
+        store volatile i8 %e, ptr %4
+        %5 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 6
+        store volatile i8 %f, ptr %5
+        %6 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 7
+        store volatile i8 %g, ptr %6
+        %7 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 8
+        store volatile i8 %h, ptr %7
+        %8 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 9
+        store volatile i8 %i, ptr %8
+        %9 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 10
+        store volatile i8 %j, ptr %9
         ret void
 }
 
@@ -95,24 +95,24 @@ define void @slot_skipping(i8 signext %a, i64 signext %b, i8 signext %c,
                            i8 signext %d, i8 signext %e, i8 signext %f,
                            i8 signext %g, i64 signext %i, i8 signext %j) nounwind {
 entry:
-        %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
-        store volatile i8 %a, i8* %0
-        %1 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 1
-        store volatile i64 %b, i64* %1
-        %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
-        store volatile i8 %c, i8* %2
-        %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
-        store volatile i8 %d, i8* %3
-        %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
-        store volatile i8 %e, i8* %4
-        %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
-        store volatile i8 %f, i8* %5
-        %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
-        store volatile i8 %g, i8* %6
-        %7 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 2
-        store volatile i64 %i, i64* %7
-        %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
-        store volatile i8 %j, i8* %8
+        %0 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 1
+        store volatile i8 %a, ptr %0
+        %1 = getelementptr [11 x i64], ptr @dwords, i32 0, i32 1
+        store volatile i64 %b, ptr %1
+        %2 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 2
+        store volatile i8 %c, ptr %2
+        %3 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 3
+        store volatile i8 %d, ptr %3
+        %4 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 4
+        store volatile i8 %e, ptr %4
+        %5 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 5
+        store volatile i8 %f, ptr %5
+        %6 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 6
+        store volatile i8 %g, ptr %6
+        %7 = getelementptr [11 x i64], ptr @dwords, i32 0, i32 2
+        store volatile i64 %i, ptr %7
+        %8 = getelementptr [11 x i8], ptr @bytes, i32 0, i32 7
+        store volatile i8 %j, ptr %8
         ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/byval.ll b/llvm/test/CodeGen/Mips/cconv/byval.ll
index 5d77107d5966a..18e1914eda404 100644
--- a/llvm/test/CodeGen/Mips/cconv/byval.ll
+++ b/llvm/test/CodeGen/Mips/cconv/byval.ll
@@ -33,7 +33,7 @@
 ; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
 ; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
 ; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
 ; O32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
 
 ; N32-SDAG-LABEL: Initial selection DAG: %bb.0 'g:entry'
@@ -41,7 +41,7 @@
 ; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
 ; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
 ; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
 ; N32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
 
 ; N64-SDAG-LABEL: Initial selection DAG: %bb.0 'g:entry'
@@ -49,7 +49,7 @@
 ; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i64<{{.*}}>
 ; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
 ; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
-; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<void (%struct.S1*)* @f2>
+; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<ptr @f2>
 ; N64-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
 
 define dso_local void @g() #0 {
@@ -152,11 +152,11 @@ define dso_local void @g() #0 {
 ; N64-NEXT:    daddu $sp, $sp, $1
 entry:
   %a = alloca %struct.S1, align 4
-  call void @f2(%struct.S1* byval(%struct.S1) align 4 %a)
+  call void @f2(ptr byval(%struct.S1) align 4 %a)
   ret void
 }
 
-declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
+declare dso_local void @f2(ptr byval(%struct.S1) align 4) #1
 
 ; O32-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
 ; O32-SDAG: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
@@ -166,7 +166,7 @@ declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
 ; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
 ; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
 ; O32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; O32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
 ; O32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
 
 ; N32-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
@@ -177,7 +177,7 @@ declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
 ; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i32<{{.*}}>
 ; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i32'memcpy'
 ; N32-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
-; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<void (%struct.S1*)* @f2>
+; N32-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i32<ptr @f2>
 ; N32-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i32<{{.*}}>
 
 ; N64-SDAG-LABEL: Initial selection DAG: %bb.0 'g2:entry'
@@ -188,10 +188,10 @@ declare dso_local void @f2(%struct.S1* byval(%struct.S1) align 4) #1
 ; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_start t{{.*}}, TargetConstant:i64<{{.*}}>
 ; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
 ; N64-SDAG-NOT: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
-; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<void (%struct.S1*)* @f2>
+; N64-SDAG: t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetGlobalAddress:i64<ptr @f2>
 ; N64-SDAG: t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<{{.*}}>
 
-define dso_local void @g2(%struct.S1* %a) {
+define dso_local void @g2(ptr %a) {
 ; O32-LABEL: g2:
 ; O32:       # %bb.0: # %entry
 ; O32-NEXT:    lui $1, 1
@@ -340,14 +340,12 @@ define dso_local void @g2(%struct.S1* %a) {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddu $sp, $sp, $1
 entry:
-  %a.addr = alloca %struct.S1*, align 4
+  %a.addr = alloca ptr, align 4
   %byval-temp = alloca %struct.S1, align 4
-  store %struct.S1* %a, %struct.S1** %a.addr, align 4
-  %0 = load %struct.S1*, %struct.S1** %a.addr, align 4
-  %1 = bitcast %struct.S1* %byval-temp to i8*
-  %2 = bitcast %struct.S1* %0 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %1, i8* align 1 %2, i32 65520, i1 false)
-  call void @f2(%struct.S1* byval(%struct.S1) align 4 %byval-temp)
+  store ptr %a, ptr %a.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 4 %byval-temp, ptr align 1 %0, i32 65520, i1 false)
+  call void @f2(ptr byval(%struct.S1) align 4 %byval-temp)
   ret void
 }
 
@@ -366,7 +364,7 @@ entry:
 ; N64-SDAG:   t{{.*}}: ch,glue = MipsISD::JmpLink t{{.*}}, TargetExternalSymbol:i64'memcpy'
 ; N64-SDAG:   t{{.*}}: ch,glue = callseq_end t{{.*}}, TargetConstant:i64<0>
 
-define dso_local i32 @g3(%struct.S1* %a, %struct.S1* %b) #0 {
+define dso_local i32 @g3(ptr %a, ptr %b) #0 {
 ; O32-LABEL: g3:
 ; O32:       # %bb.0: # %entry
 ; O32-NEXT:    addiu $sp, $sp, -32
@@ -412,16 +410,14 @@ define dso_local i32 @g3(%struct.S1* %a, %struct.S1* %b) #0 {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %a.addr = alloca %struct.S1*, align 4
-  %b.addr = alloca %struct.S1*, align 4
-  store %struct.S1* %a, %struct.S1** %a.addr, align 4
-  store %struct.S1* %b, %struct.S1** %b.addr, align 4
-  %0 = load %struct.S1*, %struct.S1** %a.addr, align 4
-  %1 = bitcast %struct.S1* %0 to i8*
-  %2 = load %struct.S1*, %struct.S1** %b.addr, align 4
-  %3 = bitcast %struct.S1* %2 to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %1, i8* align 1 %3, i32 65520, i1 false)
+  %a.addr = alloca ptr, align 4
+  %b.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  store ptr %b, ptr %b.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %1 = load ptr, ptr %b.addr, align 4
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %0, ptr align 1 %1, i32 65520, i1 false)
   ret i32 4
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #2
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1) #2

diff  --git a/llvm/test/CodeGen/Mips/cconv/memory-layout.ll b/llvm/test/CodeGen/Mips/cconv/memory-layout.ll
index 2c7a19764aa6f..dae4bfc226090 100644
--- a/llvm/test/CodeGen/Mips/cconv/memory-layout.ll
+++ b/llvm/test/CodeGen/Mips/cconv/memory-layout.ll
@@ -25,7 +25,7 @@
 @float = global float 1.0, align 1
 @dword = global i64 283686952306183, align 1
 @double = global double 1.0, align 1
- at pointer = global i8* @byte
+ at pointer = global ptr @byte
 
 ; ALL-NOT:       .p2align
 ; ALL-LABEL: byte:
@@ -74,7 +74,7 @@
 @float_array = global [2 x float] [float 1.0, float 2.0], align 1
 @dword_array = global [2 x i64] [i64 1, i64 2], align 1
 @double_array = global [2 x double] [double 1.0, double 2.0], align 1
- at pointer_array = global [2 x i8*] [i8* @byte, i8* @byte]
+ at pointer_array = global [2 x ptr] [ptr @byte, ptr @byte]
 
 ; ALL-NOT:       .p2align
 ; ALL-LABEL: byte_array:

diff  --git a/llvm/test/CodeGen/Mips/cconv/return-float.ll b/llvm/test/CodeGen/Mips/cconv/return-float.ll
index dd457fc18cd8d..8b9be38179acf 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-float.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-float.ll
@@ -21,7 +21,7 @@
 
 define float @retfloat() nounwind {
 entry:
-        %0 = load volatile float, float* @float
+        %0 = load volatile float, ptr @float
         ret float %0
 }
 
@@ -34,7 +34,7 @@ entry:
 
 define double @retdouble() nounwind {
 entry:
-        %0 = load volatile double, double* @double
+        %0 = load volatile double, ptr @double
         ret double %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll
index f376905d839d2..5f2aeee09f398 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll
@@ -34,7 +34,7 @@
 
 define float @retfloat() nounwind {
 entry:
-        %0 = load volatile float, float* @float
+        %0 = load volatile float, ptr @float
         ret float %0
 }
 
@@ -47,7 +47,7 @@ entry:
 
 define double @retdouble() nounwind {
 entry:
-        %0 = load volatile double, double* @double
+        %0 = load volatile double, ptr @double
         ret double %0
 }
 
@@ -58,7 +58,7 @@ entry:
 
 define { double, double } @retComplexDouble() #0 {
   %retval = alloca { double, double }, align 8
-  %1 = load { double, double }, { double, double }* %retval
+  %1 = load { double, double }, ptr %retval
   ret { double, double } %1
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll
index dc46ad0e588cd..b35159e06a998 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll
@@ -13,7 +13,7 @@
 
 define fp128 @retldouble() nounwind {
 entry:
-        %0 = load volatile fp128, fp128* @fp128
+        %0 = load volatile fp128, ptr @fp128
         ret fp128 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
index 43b0baa70013d..2b76b9d506b5a 100644
--- a/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
@@ -10,7 +10,7 @@
 
 define inreg {fp128} @ret_struct_fp128() nounwind {
 entry:
-        %0 = load volatile {fp128}, {fp128}* @struct_fp128
+        %0 = load volatile {fp128}, ptr @struct_fp128
         ret {fp128} %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/return.ll b/llvm/test/CodeGen/Mips/cconv/return.ll
index c2bbe77e54bb1..f0e4545bd4559 100644
--- a/llvm/test/CodeGen/Mips/cconv/return.ll
+++ b/llvm/test/CodeGen/Mips/cconv/return.ll
@@ -24,7 +24,7 @@
 
 define i8 @reti8() nounwind {
 entry:
-        %0 = load volatile i8, i8* @byte
+        %0 = load volatile i8, ptr @byte
         ret i8 %0
 }
 
@@ -38,7 +38,7 @@ entry:
 
 define i32 @reti32() nounwind {
 entry:
-        %0 = load volatile i32, i32* @word
+        %0 = load volatile i32, ptr @word
         ret i32 %0
 }
 
@@ -52,7 +52,7 @@ entry:
 
 define i64 @reti64() nounwind {
 entry:
-        %0 = load volatile i64, i64* @dword
+        %0 = load volatile i64, ptr @dword
         ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/roundl-call.ll b/llvm/test/CodeGen/Mips/cconv/roundl-call.ll
index 0861197290ae8..242b4292e5283 100644
--- a/llvm/test/CodeGen/Mips/cconv/roundl-call.ll
+++ b/llvm/test/CodeGen/Mips/cconv/roundl-call.ll
@@ -35,7 +35,7 @@ entry:
 ; HARD-FLOAT:   sdc1    $f0, 0(${{[0-9]+}})
 
   %call = call fp128 @roundl(fp128 %value)
-  store fp128 %call, fp128* @fp128
+  store fp128 %call, ptr @fp128
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/cconv/vector.ll b/llvm/test/CodeGen/Mips/cconv/vector.ll
index 3ca0e72b84cb2..6a56c861cdd37 100644
--- a/llvm/test/CodeGen/Mips/cconv/vector.ll
+++ b/llvm/test/CodeGen/Mips/cconv/vector.ll
@@ -2165,7 +2165,7 @@ define void @float_2(<2 x float> %a, <2 x float> %b) {
 ; MIPS64R5EL-NEXT:    jr $ra
 ; MIPS64R5EL-NEXT:    nop
   %1 = fadd <2 x float> %a, %b
-  store <2 x float> %1, <2 x float> * @float_res_v2f32
+  store <2 x float> %1, ptr @float_res_v2f32
   ret void
 }
 
@@ -2325,7 +2325,7 @@ define void @float_4(<4 x float> %a, <4 x float> %b) {
 ; MIPS64R5EL-NEXT:    jr $ra
 ; MIPS64R5EL-NEXT:    nop
   %1 = fadd <4 x float> %a, %b
-  store <4 x float> %1, <4 x float> * @float_res_v4f32
+  store <4 x float> %1, ptr @float_res_v4f32
   ret void
 }
 
@@ -2438,7 +2438,7 @@ define void @double_2(<2 x double> %a, <2 x double> %b) {
 ; MIPS32R5EL-NEXT:    jr $ra
 ; MIPS32R5EL-NEXT:    nop
   %1 = fadd <2 x double> %a, %b
-  store <2 x double> %1, <2 x double> * @double_v2f64
+  store <2 x double> %1, ptr @double_v2f64
   ret void
 }
 
@@ -2497,7 +2497,7 @@ define <2 x i8> @ret_2_i8() {
 ; MIPS64R5-NEXT:    lh $2, 0($1)
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <2 x i8>, <2 x i8> * @gv2i8
+  %1 = load <2 x i8>, ptr @gv2i8
   ret <2 x i8> %1
 }
 
@@ -2535,7 +2535,7 @@ define <4 x i8> @ret_4_i8() {
 ; MIPS64R5-NEXT:    lw $2, 0($1)
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <4 x i8>, <4 x i8> * @gv4i8
+  %1 = load <4 x i8>, ptr @gv4i8
   ret <4 x i8> %1
 }
 
@@ -2624,7 +2624,7 @@ define <8 x i8> @ret_8_i8() {
 ; MIPS32R5EL-NEXT:    addiu $sp, $sp, 32
 ; MIPS32R5EL-NEXT:    jr $ra
 ; MIPS32R5EL-NEXT:    nop
-  %1 = load <8 x i8>, <8 x i8> * @gv8i8
+  %1 = load <8 x i8>, ptr @gv8i8
   ret <8 x i8> %1
 }
 
@@ -2674,7 +2674,7 @@ define <16 x i8> @ret_16_i8() {
 ; MIPS64R5-NEXT:    copy_s.d $3, $w0[1]
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <16 x i8>, <16 x i8> * @gv16i8
+  %1 = load <16 x i8>, ptr @gv16i8
   ret <16 x i8> %1
 }
 
@@ -2712,7 +2712,7 @@ define <2 x i16> @ret_2_i16() {
 ; MIPS64R5-NEXT:    lw $2, 0($1)
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <2 x i16>, <2 x i16> * @gv2i16
+  %1 = load <2 x i16>, ptr @gv2i16
   ret <2 x i16> %1
 }
 
@@ -2801,7 +2801,7 @@ define <4 x i16> @ret_4_i16() {
 ; MIPS32R5EL-NEXT:    addiu $sp, $sp, 32
 ; MIPS32R5EL-NEXT:    jr $ra
 ; MIPS32R5EL-NEXT:    nop
-  %1 = load <4 x i16>, <4 x i16> * @gv4i16
+  %1 = load <4 x i16>, ptr @gv4i16
   ret <4 x i16> %1
 }
 
@@ -2851,7 +2851,7 @@ define <8 x i16> @ret_8_i16() {
 ; MIPS64R5-NEXT:    copy_s.d $3, $w0[1]
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <8 x i16>, <8 x i16> * @gv8i16
+  %1 = load <8 x i16>, ptr @gv8i16
   ret <8 x i16> %1
 }
 
@@ -2940,7 +2940,7 @@ define <2 x i32> @ret_2_i32() {
 ; MIPS32R5EL-NEXT:    addiu $sp, $sp, 32
 ; MIPS32R5EL-NEXT:    jr $ra
 ; MIPS32R5EL-NEXT:    nop
-  %1 = load <2 x i32>, <2 x i32> * @gv2i32
+  %1 = load <2 x i32>, ptr @gv2i32
   ret <2 x i32> %1
 }
 
@@ -2990,7 +2990,7 @@ define <4 x i32> @ret_4_i32() {
 ; MIPS64R5-NEXT:    copy_s.d $3, $w0[1]
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <4 x i32>, <4 x i32> * @gv4i32
+  %1 = load <4 x i32>, ptr @gv4i32
   ret <4 x i32> %1
 }
 
@@ -3040,7 +3040,7 @@ define <2 x i64> @ret_2_i64() {
 ; MIPS64R5-NEXT:    copy_s.d $3, $w0[1]
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
-  %1 = load <2 x i64>, <2 x i64> * @gv2i64
+  %1 = load <2 x i64>, ptr @gv2i64
   ret <2 x i64> %1
 }
 
@@ -3090,7 +3090,7 @@ define <2 x float> @ret_float_2() {
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
 entry:
-  %0 = load <2 x float>, <2 x float> * @gv2f32
+  %0 = load <2 x float>, ptr @gv2f32
   ret <2 x float> %0
 }
 
@@ -3142,7 +3142,7 @@ define <4 x float> @ret_float_4() {
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
 entry:
-  %0 = load <4 x float>, <4 x float> * @gv4f32
+  %0 = load <4 x float>, ptr @gv4f32
   ret <4 x float> %0
 }
 
@@ -3192,7 +3192,7 @@ define <2 x double> @ret_double_2() {
 ; MIPS64R5-NEXT:    jr $ra
 ; MIPS64R5-NEXT:    nop
 entry:
-  %0 = load <2 x double>, <2 x double> * @gv2f64
+  %0 = load <2 x double>, ptr @gv2f64
   ret <2 x double> %0
 }
 
@@ -3372,7 +3372,7 @@ define void @call_i8_2() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <2 x i8> @i8_2(<2 x i8> <i8 6, i8 7>, <2 x i8> <i8 12, i8 8>)
-  store <2 x i8> %0, <2 x i8> * @gv2i8
+  store <2 x i8> %0, ptr @gv2i8
   ret void
 }
 
@@ -3554,7 +3554,7 @@ define void @call_i8_4() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <4 x i8> @i8_4(<4 x i8> <i8 6, i8 7, i8 9, i8 10>, <4 x i8> <i8 12, i8 8, i8 9, i8 10>)
-  store <4 x i8> %0, <4 x i8> * @gv4i8
+  store <4 x i8> %0, ptr @gv4i8
   ret void
 }
 
@@ -3776,7 +3776,7 @@ define void @call_i8_8() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <8 x i8> @i8_8(<8 x i8> <i8 6, i8 7, i8 9, i8 10, i8 6, i8 7, i8 9, i8 10>, <8 x i8> <i8 12, i8 8, i8 9, i8 10, i8 6, i8 7, i8 9, i8 10>)
-  store <8 x i8> %0, <8 x i8> * @gv8i8
+  store <8 x i8> %0, ptr @gv8i8
   ret void
 }
 
@@ -4007,7 +4007,7 @@ define void @calli8_16() {
 ; MIPS64EL-NEXT:    nop
 entry:
   %0 = call <16 x i8> @i8_16(<16 x i8> <i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7,i8 6, i8 7, i8 6, i8 7, i8 9, i8 10>, <16 x i8> <i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 7, i8 9,i8 12, i8 8, i8 9, i8 10>)
-  store <16 x i8> %0, <16 x i8> * @gv16i8
+  store <16 x i8> %0, ptr @gv16i8
   ret void
 }
 
@@ -4193,7 +4193,7 @@ define void @calli16_2() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <2 x i16> @i16_2(<2 x i16> <i16 6, i16 7>, <2 x i16> <i16 12, i16 8>)
-  store <2 x i16> %0, <2 x i16> * @gv2i16
+  store <2 x i16> %0, ptr @gv2i16
   ret void
 }
 
@@ -4431,7 +4431,7 @@ define void @calli16_4() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <4 x i16> @i16_4(<4 x i16> <i16 6, i16 7, i16 9, i16 10>, <4 x i16> <i16 12, i16 8, i16 9, i16 10>)
-  store <4 x i16> %0, <4 x i16> * @gv4i16
+  store <4 x i16> %0, ptr @gv4i16
   ret void
 }
 
@@ -4731,7 +4731,7 @@ define void @calli16_8() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <8 x i16> @i16_8(<8 x i16> <i16 6, i16 7, i16 9, i16 10, i16 6, i16 7, i16 9, i16 10>, <8 x i16> <i16 6, i16 7, i16 9, i16 10, i16 12, i16 8, i16 9, i16 10>)
-  store <8 x i16> %0, <8 x i16> * @gv8i16
+  store <8 x i16> %0, ptr @gv8i16
   ret void
 }
 
@@ -4889,7 +4889,7 @@ define void @calli32_2() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <2 x i32> @i32_2(<2 x i32> <i32 6, i32 7>, <2 x i32> <i32 12, i32 8>)
-  store <2 x i32> %0, <2 x i32> * @gv2i32
+  store <2 x i32> %0, ptr @gv2i32
   ret void
 }
 
@@ -5057,7 +5057,7 @@ define void @calli32_4() {
 ; MIPS64EL-NEXT:    nop
 entry:
   %0 = call <4 x i32> @i32_4(<4 x i32> <i32 6, i32 7, i32 9, i32 10>, <4 x i32> <i32 12, i32 8, i32 9, i32 10>)
-  store <4 x i32> %0, <4 x i32> * @gv4i32
+  store <4 x i32> %0, ptr @gv4i32
   ret void
 }
 
@@ -5214,7 +5214,7 @@ define void @calli64_2() {
 ; MIPS32EL-NEXT:    nop
 entry:
   %0 = call <2 x i64> @i64_2(<2 x i64> <i64 6, i64 7>, <2 x i64> <i64 12, i64 8>)
-  store <2 x i64> %0, <2 x i64> * @gv2i64
+  store <2 x i64> %0, ptr @gv2i64
   ret void
 }
 
@@ -5362,7 +5362,7 @@ define void @callfloat_2() {
 ; MIPS64EL-NEXT:    nop
 entry:
   %0 = call <2 x float> @float2_extern(<2 x float> <float 0.0, float -1.0>, <2 x float> <float 12.0, float 14.0>)
-  store <2 x float> %0, <2 x float> * @gv2f32
+  store <2 x float> %0, ptr @gv2f32
   ret void
 }
 
@@ -5568,7 +5568,7 @@ define void @callfloat_4() {
 ; MIPS64EL-NEXT:    nop
 entry:
   %0 = call <4 x float> @float4_extern(<4 x float> <float 0.0, float -1.0, float 2.0, float 4.0>, <4 x float> <float 12.0, float 14.0, float 15.0, float 16.0>)
-  store <4 x float> %0, <4 x float> * @gv4f32
+  store <4 x float> %0, ptr @gv4f32
   ret void
 }
 
@@ -5762,7 +5762,7 @@ define void @calldouble_2() {
 ; MIPS32EL-NEXT:    nop
 entry:
   %0 = call <2 x double> @double2_extern(<2 x double> <double 0.0, double -1.0>, <2 x double> <double 12.0, double 14.0>)
-  store <2 x double> %0, <2 x double> * @gv2f64
+  store <2 x double> %0, ptr @gv2f64
   ret void
 }
 
@@ -7005,6 +7005,6 @@ define void @call_i24x2() {
 ; MIPS64R5EL-NEXT:    nop
 entry:
   %0 = call <2 x i24> @i24x2(<2 x i24> <i24 6, i24 7>, <2 x i24> <i24 12, i24 8>)
-  store <2 x i24> %0, <2 x i24> * @gv2i24
+  store <2 x i24> %0, ptr @gv2i24
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/cfi_offset.ll b/llvm/test/CodeGen/Mips/cfi_offset.ll
index e7908c0823c81..217adda59468a 100644
--- a/llvm/test/CodeGen/Mips/cfi_offset.ll
+++ b/llvm/test/CodeGen/Mips/cfi_offset.ll
@@ -32,10 +32,10 @@ define void @bar() {
 ; CHECK:  .cfi_offset 31, -20
 ; CHECK:  .cfi_offset 16, -24
 
-    %val1 = load volatile double, double* @var
-    %val2 = load volatile double, double* @var
+    %val1 = load volatile double, ptr @var
+    %val2 = load volatile double, ptr @var
     call void (...) @foo() nounwind
-    store volatile double %val1, double* @var
-    store volatile double %val2, double* @var
+    store volatile double %val1, ptr @var
+    store volatile double %val2, ptr @var
     ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/ci2.ll b/llvm/test/CodeGen/Mips/ci2.ll
index 5e6b7814d5e84..9546656ebf252 100644
--- a/llvm/test/CodeGen/Mips/ci2.ll
+++ b/llvm/test/CodeGen/Mips/ci2.ll
@@ -7,22 +7,22 @@
 ; Function Attrs: nounwind
 define void @foo() #0 {
 entry:
-  store i32 305419896, i32* @i, align 4
-  %0 = load i32, i32* @b, align 4
+  store i32 305419896, ptr @i, align 4
+  %0 = load i32, ptr @b, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 10, i32* @b, align 4
+  store i32 10, ptr @b, align 4
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 20, i32* @b, align 4
+  store i32 20, ptr @b, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
   call void asm sideeffect ".space 100000", ""() #1, !srcloc !1
-  store i32 305419896, i32* @l, align 4
+  store i32 305419896, ptr @l, align 4
   ret void
 ; constisle: $CPI0_1:
 ; constisle	.4byte	305419896               # 0x12345678

diff  --git a/llvm/test/CodeGen/Mips/cmov.ll b/llvm/test/CodeGen/Mips/cmov.ll
index 89b557c4eb069..d19c1b1f14c36 100644
--- a/llvm/test/CodeGen/Mips/cmov.ll
+++ b/llvm/test/CodeGen/Mips/cmov.ll
@@ -7,7 +7,7 @@
 ; RUN: llc -march=mips64el -mcpu=mips64r6               -relocation-model=pic < %s | FileCheck %s -check-prefixes=ALL,64-CMP
 
 @i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
- at i3 = common global i32* null, align 4
+ at i3 = common global ptr null, align 4
 
 ; ALL-LABEL: cmov1:
 
@@ -38,12 +38,12 @@
 ; 64-CMP-DAG:   or $[[T2:[0-9]+]], $[[T0]], $[[T1]]
 ; 64-CMP-DAG:   ld $2, 0($[[T2]])
 
-define i32* @cmov1(i32 signext %s) nounwind readonly {
+define ptr @cmov1(i32 signext %s) nounwind readonly {
 entry:
   %tobool = icmp ne i32 %s, 0
-  %tmp1 = load i32*, i32** @i3, align 4
-  %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32], [3 x i32]* @i1, i32 0, i32 0), i32* %tmp1
-  ret i32* %cond
+  %tmp1 = load ptr, ptr @i3, align 4
+  %cond = select i1 %tobool, ptr @i1, ptr %tmp1
+  ret ptr %cond
 }
 
 @c = global i32 1, align 4
@@ -81,8 +81,8 @@ entry:
 define i32 @cmov2(i32 signext %s) nounwind readonly {
 entry:
   %tobool = icmp ne i32 %s, 0
-  %tmp1 = load i32, i32* @c, align 4
-  %tmp2 = load i32, i32* @d, align 4
+  %tmp1 = load i32, ptr @c, align 4
+  %tmp2 = load i32, ptr @d, align 4
   %cond = select i1 %tobool, i32 %tmp1, i32 %tmp2
   ret i32 %cond
 }

diff  --git a/llvm/test/CodeGen/Mips/cmplarge.ll b/llvm/test/CodeGen/Mips/cmplarge.ll
index aa88323368c7d..a64983dfed023 100644
--- a/llvm/test/CodeGen/Mips/cmplarge.ll
+++ b/llvm/test/CodeGen/Mips/cmplarge.ll
@@ -7,10 +7,10 @@ target triple = "mipsel--linux-gnu"
 
 
 
-define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
+define void @getSubImagesLuma(ptr nocapture %s) #0 {
 entry:
-  %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1
-  %0 = load i32, i32* %size_y, align 4
+  %size_y = getelementptr inbounds %struct.StorablePicture, ptr %s, i32 0, i32 1
+  %0 = load i32, ptr %size_y, align 4
   %sub = add nsw i32 %0, -1
   %add5 = add nsw i32 %0, 20
   %cmp6 = icmp sgt i32 %add5, -20
@@ -18,9 +18,9 @@ entry:
 
 for.body:                                         ; preds = %entry, %for.body
   %j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ]
-  %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2
+  %call = tail call i32 @iClip3(i32 0, i32 %sub, i32 %j.07) #2
   %inc = add nsw i32 %j.07, 1
-  %1 = load i32, i32* %size_y, align 4
+  %1 = load i32, ptr %size_y, align 4
   %add = add nsw i32 %1, 20
   %cmp = icmp slt i32 %inc, %add
   br i1 %cmp, label %for.body, label %for.end

diff  --git a/llvm/test/CodeGen/Mips/compactbranches/beqc-bnec-register-constraint.ll b/llvm/test/CodeGen/Mips/compactbranches/beqc-bnec-register-constraint.ll
index 54e51296636cb..4c8674d46e291 100644
--- a/llvm/test/CodeGen/Mips/compactbranches/beqc-bnec-register-constraint.ll
+++ b/llvm/test/CodeGen/Mips/compactbranches/beqc-bnec-register-constraint.ll
@@ -13,20 +13,20 @@
 ; Starting from dwarf exception handling preparation skips optimizations that
 ; may simplify out the crucical bnec $4, $4 instruction.
 
-define internal void @_ZL14TestRemoveLastv(i32* %alist.sroa.0.4) {
+define internal void @_ZL14TestRemoveLastv(ptr %alist.sroa.0.4) {
 ; CHECK-LABEL: _ZL14TestRemoveLastv:
 entry:
-  %ascevgep = getelementptr i32, i32* %alist.sroa.0.4, i64 99
+  %ascevgep = getelementptr i32, ptr %alist.sroa.0.4, i64 99
   br label %do.body121
 
 for.cond117:
   %alsr.iv.next = add nsw i32 %alsr.iv, -1
-  %ascevgep340 = getelementptr i32, i32* %alsr.iv339, i64 -1
+  %ascevgep340 = getelementptr i32, ptr %alsr.iv339, i64 -1
   %acmp118 = icmp sgt i32 %alsr.iv.next, 0
   br i1 %acmp118, label %do.body121, label %if.then143
 
 do.body121:
-  %alsr.iv339 = phi i32* [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
+  %alsr.iv339 = phi ptr [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
   %alsr.iv = phi i32 [ 100, %entry ], [ %alsr.iv.next, %for.cond117 ]
   %a9 = add i32 %alsr.iv, -1
   %alnot124 = icmp eq i32 %alsr.iv, %alsr.iv
@@ -34,7 +34,7 @@ do.body121:
 
 do.body134:
   %a10 = add i32 %alsr.iv, -1
-  %a11 = load i32, i32* %alsr.iv339, align 4, !tbaa !5
+  %a11 = load i32, ptr %alsr.iv339, align 4, !tbaa !5
 ; CHECK-NOT: bnec $[[R0:[0-9]+]], $[[R0]]
 ; CHECK-NOT: beqc $[[R1:[0-9]+]], $[[R1]]
   %alnot137 = icmp eq i32 %a9, %a11
@@ -50,20 +50,20 @@ do.end146:
 
 }
 
-define internal void @_ZL14TestRemoveLastv64(i64* %alist.sroa.0.4) {
+define internal void @_ZL14TestRemoveLastv64(ptr %alist.sroa.0.4) {
 ; CHECK-LABEL: _ZL14TestRemoveLastv64:
 entry:
-  %ascevgep = getelementptr i64, i64* %alist.sroa.0.4, i64 99
+  %ascevgep = getelementptr i64, ptr %alist.sroa.0.4, i64 99
   br label %do.body121
 
 for.cond117:
   %alsr.iv.next = add nsw i64 %alsr.iv, -1
-  %ascevgep340 = getelementptr i64, i64* %alsr.iv339, i64 -1
+  %ascevgep340 = getelementptr i64, ptr %alsr.iv339, i64 -1
   %acmp118 = icmp sgt i64 %alsr.iv.next, 0
   br i1 %acmp118, label %do.body121, label %if.then143
 
 do.body121:
-  %alsr.iv339 = phi i64* [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
+  %alsr.iv339 = phi ptr [ %ascevgep, %entry ], [ %ascevgep340, %for.cond117 ]
   %alsr.iv = phi i64 [ 100, %entry ], [ %alsr.iv.next, %for.cond117 ]
   %a9 = add i64 %alsr.iv, -1
   %alnot124 = icmp eq i64 %alsr.iv, %alsr.iv
@@ -71,7 +71,7 @@ do.body121:
 
 do.body134:
   %a10 = add i64 %alsr.iv, -1
-  %a11 = load i64, i64* %alsr.iv339, align 4, !tbaa !5
+  %a11 = load i64, ptr %alsr.iv339, align 4, !tbaa !5
 ; CHECK-NOT: bnec $[[R0:[0-9]+]], $[[R0]]
 ; CHECK-NOT: beqc $[[R1:[0-9]+]], $[[R1]]
   %alnot137 = icmp eq i64 %a9, %a11

diff  --git a/llvm/test/CodeGen/Mips/compactbranches/compact-branches-64.ll b/llvm/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
index 1290acd29d96f..1ba80bff8d5af 100644
--- a/llvm/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
+++ b/llvm/test/CodeGen/Mips/compactbranches/compact-branches-64.ll
@@ -172,11 +172,11 @@ if.end:                                           ; preds = %entry, %if.then
   ret void
 }
 
-define i64 @l9(i8* ()* %i) {
+define i64 @l9(ptr %i) {
 entry:
 ; CHECK-LABEL: l9:
-  %i.addr = alloca i8* ()*, align 4
-  store i8* ()* %i, i8* ()** %i.addr, align 4
+  %i.addr = alloca ptr, align 4
+  store ptr %i, ptr %i.addr, align 4
 ; CHECK: jalrc $25
   %call = call i64 @k()
   %cmp = icmp ne i64 %call, 0
@@ -184,9 +184,9 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load i8* ()*, i8* ()** %i.addr, align 4
+  %0 = load ptr, ptr %i.addr, align 4
 ; CHECK: jalrc $25
-  %call1 = call i8* %0()
+  %call1 = call ptr %0()
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/compactbranches/compact-branches.ll b/llvm/test/CodeGen/Mips/compactbranches/compact-branches.ll
index 6d2f33e327218..e8ec97e421824 100644
--- a/llvm/test/CodeGen/Mips/compactbranches/compact-branches.ll
+++ b/llvm/test/CodeGen/Mips/compactbranches/compact-branches.ll
@@ -183,10 +183,10 @@ if.end:                                           ; preds = %entry, %if.then
   ret void
 }
 
-define i32 @l9(i8* ()* %i) #0 {
+define i32 @l9(ptr %i) #0 {
 entry:
-  %i.addr = alloca i8* ()*, align 4
-  store i8* ()* %i, i8* ()** %i.addr, align 4
+  %i.addr = alloca ptr, align 4
+  store ptr %i, ptr %i.addr, align 4
 ; STATIC32: jal
 ; STATIC32: nop
 ; PIC: jalrc $25
@@ -197,9 +197,9 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
-  %0 = load i8* ()*, i8* ()** %i.addr, align 4
+  %0 = load ptr, ptr %i.addr, align 4
 ; CHECK: jalrc $25
-  %call1 = call i8* %0()
+  %call1 = call ptr %0()
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry

diff  --git a/llvm/test/CodeGen/Mips/compactbranches/unsafe-in-forbidden-slot.ll b/llvm/test/CodeGen/Mips/compactbranches/unsafe-in-forbidden-slot.ll
index d8046ea0e258a..cbd8b2370ac96 100644
--- a/llvm/test/CodeGen/Mips/compactbranches/unsafe-in-forbidden-slot.ll
+++ b/llvm/test/CodeGen/Mips/compactbranches/unsafe-in-forbidden-slot.ll
@@ -6,7 +6,7 @@
 ; Function Attrs: nounwind
 define void @_Z3foov() #0 {
 entry:
-  %0 = load volatile i32, i32* @boo, align 4
+  %0 = load volatile i32, ptr @boo, align 4
   switch i32 %0, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
@@ -14,7 +14,7 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  store volatile i32 1, i32* @boo, align 4
+  store volatile i32 1, ptr @boo, align 4
   br label %sw.epilog
 ; CHECK: beqzc
 ; CHECK-NEXT: nop
@@ -22,7 +22,7 @@ sw.bb:                                            ; preds = %entry
 ; CHECK-NEXT: j
 
 sw.bb1:                                           ; preds = %entry, %entry
-  store volatile i32 2, i32* @boo, align 4
+  store volatile i32 2, ptr @boo, align 4
   br label %sw.epilog
 ; CHECK: bnezc
 ; CHECK-NEXT: nop

diff  --git a/llvm/test/CodeGen/Mips/const1.ll b/llvm/test/CodeGen/Mips/const1.ll
index 8c6fe8fa7fa4b..429107cfdbcda 100644
--- a/llvm/test/CodeGen/Mips/const1.ll
+++ b/llvm/test/CodeGen/Mips/const1.ll
@@ -12,10 +12,10 @@ target triple = "mipsel-unknown-linux"
 ; Function Attrs: nounwind
 define void @t() #0 {
 entry:
-  store i32 -559023410, i32* @i, align 4
-  store i32 -559023410, i32* @j, align 4
-  store i32 -87105875, i32* @k, align 4
-  store i32 262991277, i32* @l, align 4
+  store i32 -559023410, ptr @i, align 4
+  store i32 -559023410, ptr @j, align 4
+  store i32 -87105875, ptr @k, align 4
+  store i32 262991277, ptr @l, align 4
   ret void
 ; CHECK: 	lw	${{[0-9]+}}, $CPI0_0
 ; CHECK:	lw	${{[0-9]+}}, $CPI0_1

diff  --git a/llvm/test/CodeGen/Mips/const4a.ll b/llvm/test/CodeGen/Mips/const4a.ll
index 79cea7c18deab..757629de5da35 100644
--- a/llvm/test/CodeGen/Mips/const4a.ll
+++ b/llvm/test/CodeGen/Mips/const4a.ll
@@ -13,8 +13,8 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @t() #0 {
 entry:
-  store i32 -559023410, i32* @i, align 4
-  %0 = load i32, i32* @b, align 4
+  store i32 -559023410, ptr @i, align 4
+  %0 = load i32, ptr @b, align 4
 ; no-load-relax:	lw	${{[0-9]+}}, $CPI0_1	# 16 bit inst
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.else
@@ -27,142 +27,142 @@ entry:
 ; no-load-relax: $BB0_3:
 ; no-load-relax:	lw	${{[0-9]+}}, %call16(goo)(${{[0-9]+}})
 if.then:                                          ; preds = %entry
-  call void bitcast (void (...)* @foo to void ()*)()
+  call void @foo()
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  call void bitcast (void (...)* @goo to void ()*)()
+  call void @goo()
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/const6.ll b/llvm/test/CodeGen/Mips/const6.ll
index e8c3f8c41a081..27e8eace4b163 100644
--- a/llvm/test/CodeGen/Mips/const6.ll
+++ b/llvm/test/CodeGen/Mips/const6.ll
@@ -15,7 +15,7 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @t() #0 {
 entry:
-  store i32 -559023410, i32* @i, align 4
+  store i32 -559023410, ptr @i, align 4
 ; load-relax: 	lw	${{[0-9]+}}, $CPI0_0
 ; load-relax:	jrc	 $ra
 ; load-relax:	.p2align	2
@@ -31,124 +31,124 @@ entry:
 ; no-load-relax:	.4byte	3735943886
 ; no-load-relax: $BB0_2:
 
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
-  call void bitcast (void (...)* @hoo to void ()*)()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
+  call void @hoo()
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/const6a.ll b/llvm/test/CodeGen/Mips/const6a.ll
index ed0f52fb9d8b9..5a3f4a1f951ef 100644
--- a/llvm/test/CodeGen/Mips/const6a.ll
+++ b/llvm/test/CodeGen/Mips/const6a.ll
@@ -11,7 +11,7 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @t() #0 {
 entry:
-  store i32 -559023410, i32* @i, align 4
+  store i32 -559023410, ptr @i, align 4
 ; load-relax-NOT: 	lw	${{[0-9]+}}, $CPI0_0 # 16 bit inst
 ; load-relax1: lw	${{[0-9]+}}, $CPI0_0
 ; load-relax:	jrc	 $ra

diff  --git a/llvm/test/CodeGen/Mips/constraint-c-err.ll b/llvm/test/CodeGen/Mips/constraint-c-err.ll
index 4015ef4806539..94768f89e9924 100644
--- a/llvm/test/CodeGen/Mips/constraint-c-err.ll
+++ b/llvm/test/CodeGen/Mips/constraint-c-err.ll
@@ -4,8 +4,8 @@
 define i32 @main() #0 {
 entry:
   %jmp = alloca float, align 4
-  store float 0x4200000000000000, float* %jmp, align 4
-  %0 = load float, float* %jmp, align 4
+  store float 0x4200000000000000, ptr %jmp, align 4
+  %0 = load float, ptr %jmp, align 4
   call void asm sideeffect "jr $0", "c,~{$1}"(float %0) #1
 
 ; CHECK: error: couldn't allocate input reg for constraint 'c'

diff  --git a/llvm/test/CodeGen/Mips/constraint-c.ll b/llvm/test/CodeGen/Mips/constraint-c.ll
index 5a5d7672e9564..31b75b01a5e85 100644
--- a/llvm/test/CodeGen/Mips/constraint-c.ll
+++ b/llvm/test/CodeGen/Mips/constraint-c.ll
@@ -4,8 +4,8 @@
 define i32 @main() #0 {
 entry:
   %jmp = alloca i32, align 4
-  store i32 0, i32* %jmp, align 4
-  %0 = load i32, i32* %jmp, align 4
+  store i32 0, ptr %jmp, align 4
+  %0 = load i32, ptr %jmp, align 4
   call void asm sideeffect "jr $0", "c,~{$1}"(i32 %0) #1
 
 ; CHECK: addiu   $25, $zero, 0

diff  --git a/llvm/test/CodeGen/Mips/constraint-empty.ll b/llvm/test/CodeGen/Mips/constraint-empty.ll
index 849320f61a15b..8541a4fa348e6 100644
--- a/llvm/test/CodeGen/Mips/constraint-empty.ll
+++ b/llvm/test/CodeGen/Mips/constraint-empty.ll
@@ -5,7 +5,7 @@ define void @foo() {
 entry:
   %s = alloca i32, align 4
   %x = alloca i32, align 4
-  call void asm "", "=*imr,=*m,0,*m,~{$1}"(i32* elementtype(i32) %x, i32* elementtype(i32) %s, i32* %x, i32* elementtype(i32) %s)
+  call void asm "", "=*imr,=*m,0,*m,~{$1}"(ptr elementtype(i32) %x, ptr elementtype(i32) %s, ptr %x, ptr elementtype(i32) %s)
 
 ; CHECK: #APP
 ; CHECK: #NO_APP

diff  --git a/llvm/test/CodeGen/Mips/cprestore.ll b/llvm/test/CodeGen/Mips/cprestore.ll
index b4489b6e2211a..bb27c947d998e 100644
--- a/llvm/test/CodeGen/Mips/cprestore.ll
+++ b/llvm/test/CodeGen/Mips/cprestore.ll
@@ -13,8 +13,8 @@
 define void @foo2() nounwind {
 entry:
   %s = alloca %struct.S, align 4
-  call void @foo1(%struct.S* byval(%struct.S) %s)
+  call void @foo1(ptr byval(%struct.S) %s)
   ret void
 }
 
-declare void @foo1(%struct.S* byval(%struct.S))
+declare void @foo1(ptr byval(%struct.S))

diff  --git a/llvm/test/CodeGen/Mips/cstmaterialization/stack.ll b/llvm/test/CodeGen/Mips/cstmaterialization/stack.ll
index 41b5bf638107a..78c01d9b7473e 100644
--- a/llvm/test/CodeGen/Mips/cstmaterialization/stack.ll
+++ b/llvm/test/CodeGen/Mips/cstmaterialization/stack.ll
@@ -11,8 +11,7 @@
 define i32 @main() {
 entry:
   %z = alloca [1048576 x i8], align 1
-  %arraydecay = getelementptr inbounds [1048576 x i8], [1048576 x i8]* %z, i32 0, i32 0
-  %call = call i32 @foo(i8* %arraydecay)
+  %call = call i32 @foo(ptr %z)
   ret i32 0
 ; CHECK-LABEL: main
 
@@ -51,4 +50,4 @@ entry:
 
 }
 
-declare i32 @foo(i8*)
+declare i32 @foo(ptr)

diff  --git a/llvm/test/CodeGen/Mips/ctlz.ll b/llvm/test/CodeGen/Mips/ctlz.ll
index c502b0d4cb896..efb23f55c2cf2 100644
--- a/llvm/test/CodeGen/Mips/ctlz.ll
+++ b/llvm/test/CodeGen/Mips/ctlz.ll
@@ -8,10 +8,10 @@
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* @x, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr @x, align 4
   %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true)
-  store i32 %1, i32* @y, align 4
+  store i32 %1, ptr @y, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/dagcombine-store-gep-chain-slow.ll b/llvm/test/CodeGen/Mips/dagcombine-store-gep-chain-slow.ll
index c4ceff83cc183..8c2ac7a7f2ce3 100644
--- a/llvm/test/CodeGen/Mips/dagcombine-store-gep-chain-slow.ll
+++ b/llvm/test/CodeGen/Mips/dagcombine-store-gep-chain-slow.ll
@@ -4,7 +4,7 @@
 ; optimization level check in findBetterNeighbors, this test demonstrates
 ; a severe compile time regression (30~ minutes) vs. <10 seconds at 'optnone'.
 
-declare i8 @k(i8*)
+declare i8 @k(ptr)
 
 define void @d(i32 signext %e4) #1 {
 entry:
@@ -13,601 +13,598 @@ entry:
   %new_val = alloca i8, align 1
   %simd = alloca i8, align 1
   %code = alloca [269 x i8], align 1
-  store i32 %e4, i32* %e4.addr, align 4
-  %call = call zeroext i8 @k(i8* %simd)
-  store i8 %call, i8* %simd, align 1
+  store i32 %e4, ptr %e4.addr, align 4
+  %call = call zeroext i8 @k(ptr %simd)
+  store i8 %call, ptr %simd, align 1
 
-  %arrayinit.begin = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
-  store i8 32, i8* %arrayinit.begin, align 1
-  %arrayinit.element = getelementptr inbounds i8, i8* %arrayinit.begin, i32 1
-  %a2 = load i8, i8* %old_val, align 1
-  store i8 %a2, i8* %arrayinit.element, align 1
-  %arrayinit.element1 = getelementptr inbounds i8, i8* %arrayinit.element, i32 1
-  store i8 -3, i8* %arrayinit.element1, align 1
-  %arrayinit.element2 = getelementptr inbounds i8, i8* %arrayinit.element1, i32 1
-  store i8 0, i8* %arrayinit.element2, align 1
-  %arrayinit.element3 = getelementptr inbounds i8, i8* %arrayinit.element2, i32 1
-  store i8 33, i8* %arrayinit.element3, align 1
-  %arrayinit.element4 = getelementptr inbounds i8, i8* %arrayinit.element3, i32 1
-  %a3 = load i8, i8* %simd, align 1
-  store i8 %a3, i8* %arrayinit.element4, align 1
-  %arrayinit.element5 = getelementptr inbounds i8, i8* %arrayinit.element4, i32 1
-  store i8 32, i8* %arrayinit.element5, align 1
-  %arrayinit.element6 = getelementptr inbounds i8, i8* %arrayinit.element5, i32 1
-  %a4 = load i8, i8* %simd, align 1
-  store i8 %a4, i8* %arrayinit.element6, align 1
-  %arrayinit.element7 = getelementptr inbounds i8, i8* %arrayinit.element6, i32 1
-  store i8 32, i8* %arrayinit.element7, align 1
-  %arrayinit.element8 = getelementptr inbounds i8, i8* %arrayinit.element7, i32 1
-  %a5 = load i8, i8* %new_val, align 1
-  store i8 %a5, i8* %arrayinit.element8, align 1
-  %arrayinit.element9 = getelementptr inbounds i8, i8* %arrayinit.element8, i32 1
-  store i8 -3, i8* %arrayinit.element9, align 1
-  %arrayinit.element10 = getelementptr inbounds i8, i8* %arrayinit.element9, i32 1
-  store i8 2, i8* %arrayinit.element10, align 1
-  %arrayinit.element11 = getelementptr inbounds i8, i8* %arrayinit.element10, i32 1
-  store i8 0, i8* %arrayinit.element11, align 1
-  %arrayinit.element12 = getelementptr inbounds i8, i8* %arrayinit.element11, i32 1
-  store i8 33, i8* %arrayinit.element12, align 1
-  %arrayinit.element13 = getelementptr inbounds i8, i8* %arrayinit.element12, i32 1
-  %a6 = load i8, i8* %simd, align 1
-  store i8 %a6, i8* %arrayinit.element13, align 1
-  %arrayinit.element14 = getelementptr inbounds i8, i8* %arrayinit.element13, i32 1
-  store i8 32, i8* %arrayinit.element14, align 1
-  %arrayinit.element15 = getelementptr inbounds i8, i8* %arrayinit.element14, i32 1
-  %a7 = load i8, i8* %new_val, align 1
-  store i8 %a7, i8* %arrayinit.element15, align 1
-  %arrayinit.element16 = getelementptr inbounds i8, i8* %arrayinit.element15, i32 1
-  store i8 32, i8* %arrayinit.element16, align 1
-  %arrayinit.element17 = getelementptr inbounds i8, i8* %arrayinit.element16, i32 1
-  %a8 = load i8, i8* %simd, align 1
-  store i8 %a8, i8* %arrayinit.element17, align 1
-  %arrayinit.element18 = getelementptr inbounds i8, i8* %arrayinit.element17, i32 1
-  store i8 -3, i8* %arrayinit.element18, align 1
-  %arrayinit.element19 = getelementptr inbounds i8, i8* %arrayinit.element18, i32 1
-  store i8 1, i8* %arrayinit.element19, align 1
-  %arrayinit.element20 = getelementptr inbounds i8, i8* %arrayinit.element19, i32 1
-  store i8 0, i8* %arrayinit.element20, align 1
-  %arrayinit.element21 = getelementptr inbounds i8, i8* %arrayinit.element20, i32 1
-  store i8 92, i8* %arrayinit.element21, align 1
-  %arrayinit.element22 = getelementptr inbounds i8, i8* %arrayinit.element21, i32 1
-  store i8 4, i8* %arrayinit.element22, align 1
-  %arrayinit.element23 = getelementptr inbounds i8, i8* %arrayinit.element22, i32 1
-  store i8 64, i8* %arrayinit.element23, align 1
-  %arrayinit.element24 = getelementptr inbounds i8, i8* %arrayinit.element23, i32 1
-  store i8 65, i8* %arrayinit.element24, align 1
-  %arrayinit.element25 = getelementptr inbounds i8, i8* %arrayinit.element24, i32 1
-  store i8 0, i8* %arrayinit.element25, align 1
-  %arrayinit.element26 = getelementptr inbounds i8, i8* %arrayinit.element25, i32 1
-  store i8 15, i8* %arrayinit.element26, align 1
-  %arrayinit.element27 = getelementptr inbounds i8, i8* %arrayinit.element26, i32 1
-  store i8 11, i8* %arrayinit.element27, align 1
-  %arrayinit.element28 = getelementptr inbounds i8, i8* %arrayinit.element27, i32 1
-  store i8 32, i8* %arrayinit.element28, align 1
-  %arrayinit.element29 = getelementptr inbounds i8, i8* %arrayinit.element28, i32 1
-  %a9 = load i8, i8* %old_val, align 1
-  store i8 %a9, i8* %arrayinit.element29, align 1
-  %arrayinit.element30 = getelementptr inbounds i8, i8* %arrayinit.element29, i32 1
-  store i8 32, i8* %arrayinit.element30, align 1
-  %arrayinit.element31 = getelementptr inbounds i8, i8* %arrayinit.element30, i32 1
-  %a10 = load i8, i8* %simd, align 1
-  store i8 %a10, i8* %arrayinit.element31, align 1
-  %arrayinit.element32 = getelementptr inbounds i8, i8* %arrayinit.element31, i32 1
-  store i8 -3, i8* %arrayinit.element32, align 1
-  %arrayinit.element33 = getelementptr inbounds i8, i8* %arrayinit.element32, i32 1
-  store i8 1, i8* %arrayinit.element33, align 1
-  %arrayinit.element34 = getelementptr inbounds i8, i8* %arrayinit.element33, i32 1
-  store i8 1, i8* %arrayinit.element34, align 1
-  %arrayinit.element35 = getelementptr inbounds i8, i8* %arrayinit.element34, i32 1
-  store i8 92, i8* %arrayinit.element35, align 1
-  %arrayinit.element36 = getelementptr inbounds i8, i8* %arrayinit.element35, i32 1
-  store i8 4, i8* %arrayinit.element36, align 1
-  %arrayinit.element37 = getelementptr inbounds i8, i8* %arrayinit.element36, i32 1
-  store i8 64, i8* %arrayinit.element37, align 1
-  %arrayinit.element38 = getelementptr inbounds i8, i8* %arrayinit.element37, i32 1
-  store i8 65, i8* %arrayinit.element38, align 1
-  %arrayinit.element39 = getelementptr inbounds i8, i8* %arrayinit.element38, i32 1
-  store i8 0, i8* %arrayinit.element39, align 1
-  %arrayinit.element40 = getelementptr inbounds i8, i8* %arrayinit.element39, i32 1
-  store i8 15, i8* %arrayinit.element40, align 1
-  %arrayinit.element41 = getelementptr inbounds i8, i8* %arrayinit.element40, i32 1
-  store i8 11, i8* %arrayinit.element41, align 1
-  %arrayinit.element42 = getelementptr inbounds i8, i8* %arrayinit.element41, i32 1
-  store i8 32, i8* %arrayinit.element42, align 1
-  %arrayinit.element43 = getelementptr inbounds i8, i8* %arrayinit.element42, i32 1
-  %a11 = load i8, i8* %old_val, align 1
-  store i8 %a11, i8* %arrayinit.element43, align 1
-  %arrayinit.element44 = getelementptr inbounds i8, i8* %arrayinit.element43, i32 1
-  store i8 32, i8* %arrayinit.element44, align 1
-  %arrayinit.element45 = getelementptr inbounds i8, i8* %arrayinit.element44, i32 1
-  %a12 = load i8, i8* %simd, align 1
-  store i8 %a12, i8* %arrayinit.element45, align 1
-  %arrayinit.element46 = getelementptr inbounds i8, i8* %arrayinit.element45, i32 1
-  store i8 -3, i8* %arrayinit.element46, align 1
-  %arrayinit.element47 = getelementptr inbounds i8, i8* %arrayinit.element46, i32 1
-  store i8 1, i8* %arrayinit.element47, align 1
-  %arrayinit.element48 = getelementptr inbounds i8, i8* %arrayinit.element47, i32 1
-  store i8 2, i8* %arrayinit.element48, align 1
-  %arrayinit.element49 = getelementptr inbounds i8, i8* %arrayinit.element48, i32 1
-  store i8 92, i8* %arrayinit.element49, align 1
-  %arrayinit.element50 = getelementptr inbounds i8, i8* %arrayinit.element49, i32 1
-  store i8 4, i8* %arrayinit.element50, align 1
-  %arrayinit.element51 = getelementptr inbounds i8, i8* %arrayinit.element50, i32 1
-  store i8 64, i8* %arrayinit.element51, align 1
-  %arrayinit.element52 = getelementptr inbounds i8, i8* %arrayinit.element51, i32 1
-  store i8 65, i8* %arrayinit.element52, align 1
-  %arrayinit.element53 = getelementptr inbounds i8, i8* %arrayinit.element52, i32 1
-  store i8 0, i8* %arrayinit.element53, align 1
-  %arrayinit.element54 = getelementptr inbounds i8, i8* %arrayinit.element53, i32 1
-  store i8 15, i8* %arrayinit.element54, align 1
-  %arrayinit.element55 = getelementptr inbounds i8, i8* %arrayinit.element54, i32 1
-  store i8 11, i8* %arrayinit.element55, align 1
-  %arrayinit.element56 = getelementptr inbounds i8, i8* %arrayinit.element55, i32 1
-  store i8 32, i8* %arrayinit.element56, align 1
-  %arrayinit.element57 = getelementptr inbounds i8, i8* %arrayinit.element56, i32 1
-  %a13 = load i8, i8* %old_val, align 1
-  store i8 %a13, i8* %arrayinit.element57, align 1
-  %arrayinit.element58 = getelementptr inbounds i8, i8* %arrayinit.element57, i32 1
-  store i8 32, i8* %arrayinit.element58, align 1
-  %arrayinit.element59 = getelementptr inbounds i8, i8* %arrayinit.element58, i32 1
-  %a14 = load i8, i8* %simd, align 1
-  store i8 %a14, i8* %arrayinit.element59, align 1
-  %arrayinit.element60 = getelementptr inbounds i8, i8* %arrayinit.element59, i32 1
-  store i8 -3, i8* %arrayinit.element60, align 1
-  %arrayinit.element61 = getelementptr inbounds i8, i8* %arrayinit.element60, i32 1
-  store i8 1, i8* %arrayinit.element61, align 1
-  %arrayinit.element62 = getelementptr inbounds i8, i8* %arrayinit.element61, i32 1
-  store i8 3, i8* %arrayinit.element62, align 1
-  %arrayinit.element63 = getelementptr inbounds i8, i8* %arrayinit.element62, i32 1
-  store i8 92, i8* %arrayinit.element63, align 1
-  %arrayinit.element64 = getelementptr inbounds i8, i8* %arrayinit.element63, i32 1
-  store i8 4, i8* %arrayinit.element64, align 1
-  %arrayinit.element65 = getelementptr inbounds i8, i8* %arrayinit.element64, i32 1
-  store i8 64, i8* %arrayinit.element65, align 1
-  %arrayinit.element66 = getelementptr inbounds i8, i8* %arrayinit.element65, i32 1
-  store i8 65, i8* %arrayinit.element66, align 1
-  %arrayinit.element67 = getelementptr inbounds i8, i8* %arrayinit.element66, i32 1
-  store i8 0, i8* %arrayinit.element67, align 1
-  %arrayinit.element68 = getelementptr inbounds i8, i8* %arrayinit.element67, i32 1
-  store i8 15, i8* %arrayinit.element68, align 1
-  %arrayinit.element69 = getelementptr inbounds i8, i8* %arrayinit.element68, i32 1
-  store i8 11, i8* %arrayinit.element69, align 1
-  %arrayinit.element70 = getelementptr inbounds i8, i8* %arrayinit.element69, i32 1
-  store i8 32, i8* %arrayinit.element70, align 1
-  %arrayinit.element71 = getelementptr inbounds i8, i8* %arrayinit.element70, i32 1
-  %a15 = load i8, i8* %simd, align 1
-  store i8 %a15, i8* %arrayinit.element71, align 1
-  %arrayinit.element72 = getelementptr inbounds i8, i8* %arrayinit.element71, i32 1
-  store i8 32, i8* %arrayinit.element72, align 1
-  %arrayinit.element73 = getelementptr inbounds i8, i8* %arrayinit.element72, i32 1
-  %a16 = load i8, i8* %new_val, align 1
-  store i8 %a16, i8* %arrayinit.element73, align 1
-  %arrayinit.element74 = getelementptr inbounds i8, i8* %arrayinit.element73, i32 1
-  store i8 -3, i8* %arrayinit.element74, align 1
-  %arrayinit.element75 = getelementptr inbounds i8, i8* %arrayinit.element74, i32 1
-  store i8 2, i8* %arrayinit.element75, align 1
-  %arrayinit.element76 = getelementptr inbounds i8, i8* %arrayinit.element75, i32 1
-  store i8 1, i8* %arrayinit.element76, align 1
-  %arrayinit.element77 = getelementptr inbounds i8, i8* %arrayinit.element76, i32 1
-  store i8 33, i8* %arrayinit.element77, align 1
-  %arrayinit.element78 = getelementptr inbounds i8, i8* %arrayinit.element77, i32 1
-  %a17 = load i8, i8* %simd, align 1
-  store i8 %a17, i8* %arrayinit.element78, align 1
-  %arrayinit.element79 = getelementptr inbounds i8, i8* %arrayinit.element78, i32 1
-  store i8 32, i8* %arrayinit.element79, align 1
-  %arrayinit.element80 = getelementptr inbounds i8, i8* %arrayinit.element79, i32 1
-  %a18 = load i8, i8* %new_val, align 1
-  store i8 %a18, i8* %arrayinit.element80, align 1
-  %arrayinit.element81 = getelementptr inbounds i8, i8* %arrayinit.element80, i32 1
-  store i8 32, i8* %arrayinit.element81, align 1
-  %arrayinit.element82 = getelementptr inbounds i8, i8* %arrayinit.element81, i32 1
-  %a19 = load i8, i8* %simd, align 1
-  store i8 %a19, i8* %arrayinit.element82, align 1
-  %arrayinit.element83 = getelementptr inbounds i8, i8* %arrayinit.element82, i32 1
-  store i8 -3, i8* %arrayinit.element83, align 1
-  %arrayinit.element84 = getelementptr inbounds i8, i8* %arrayinit.element83, i32 1
-  store i8 1, i8* %arrayinit.element84, align 1
-  %arrayinit.element85 = getelementptr inbounds i8, i8* %arrayinit.element84, i32 1
-  store i8 0, i8* %arrayinit.element85, align 1
-  %arrayinit.element86 = getelementptr inbounds i8, i8* %arrayinit.element85, i32 1
-  store i8 92, i8* %arrayinit.element86, align 1
-  %arrayinit.element87 = getelementptr inbounds i8, i8* %arrayinit.element86, i32 1
-  store i8 4, i8* %arrayinit.element87, align 1
-  %arrayinit.element88 = getelementptr inbounds i8, i8* %arrayinit.element87, i32 1
-  store i8 64, i8* %arrayinit.element88, align 1
-  %arrayinit.element89 = getelementptr inbounds i8, i8* %arrayinit.element88, i32 1
-  store i8 65, i8* %arrayinit.element89, align 1
-  %arrayinit.element90 = getelementptr inbounds i8, i8* %arrayinit.element89, i32 1
-  store i8 0, i8* %arrayinit.element90, align 1
-  %arrayinit.element91 = getelementptr inbounds i8, i8* %arrayinit.element90, i32 1
-  store i8 15, i8* %arrayinit.element91, align 1
-  %arrayinit.element92 = getelementptr inbounds i8, i8* %arrayinit.element91, i32 1
-  store i8 11, i8* %arrayinit.element92, align 1
-  %arrayinit.element93 = getelementptr inbounds i8, i8* %arrayinit.element92, i32 1
-  store i8 32, i8* %arrayinit.element93, align 1
-  %arrayinit.element94 = getelementptr inbounds i8, i8* %arrayinit.element93, i32 1
-  %a20 = load i8, i8* %new_val, align 1
-  store i8 %a20, i8* %arrayinit.element94, align 1
-  %arrayinit.element95 = getelementptr inbounds i8, i8* %arrayinit.element94, i32 1
-  store i8 32, i8* %arrayinit.element95, align 1
-  %arrayinit.element96 = getelementptr inbounds i8, i8* %arrayinit.element95, i32 1
-  %a21 = load i8, i8* %simd, align 1
-  store i8 %a21, i8* %arrayinit.element96, align 1
-  %arrayinit.element97 = getelementptr inbounds i8, i8* %arrayinit.element96, i32 1
-  store i8 -3, i8* %arrayinit.element97, align 1
-  %arrayinit.element98 = getelementptr inbounds i8, i8* %arrayinit.element97, i32 1
-  store i8 1, i8* %arrayinit.element98, align 1
-  %arrayinit.element99 = getelementptr inbounds i8, i8* %arrayinit.element98, i32 1
-  store i8 1, i8* %arrayinit.element99, align 1
-  %arrayinit.element100 = getelementptr inbounds i8, i8* %arrayinit.element99, i32 1
-  store i8 92, i8* %arrayinit.element100, align 1
-  %arrayinit.element101 = getelementptr inbounds i8, i8* %arrayinit.element100, i32 1
-  store i8 4, i8* %arrayinit.element101, align 1
-  %arrayinit.element102 = getelementptr inbounds i8, i8* %arrayinit.element101, i32 1
-  store i8 64, i8* %arrayinit.element102, align 1
-  %arrayinit.element103 = getelementptr inbounds i8, i8* %arrayinit.element102, i32 1
-  store i8 65, i8* %arrayinit.element103, align 1
-  %arrayinit.element104 = getelementptr inbounds i8, i8* %arrayinit.element103, i32 1
-  store i8 0, i8* %arrayinit.element104, align 1
-  %arrayinit.element105 = getelementptr inbounds i8, i8* %arrayinit.element104, i32 1
-  store i8 15, i8* %arrayinit.element105, align 1
-  %arrayinit.element106 = getelementptr inbounds i8, i8* %arrayinit.element105, i32 1
-  store i8 11, i8* %arrayinit.element106, align 1
-  %arrayinit.element107 = getelementptr inbounds i8, i8* %arrayinit.element106, i32 1
-  store i8 32, i8* %arrayinit.element107, align 1
-  %arrayinit.element108 = getelementptr inbounds i8, i8* %arrayinit.element107, i32 1
-  %a22 = load i8, i8* %old_val, align 1
-  store i8 %a22, i8* %arrayinit.element108, align 1
-  %arrayinit.element109 = getelementptr inbounds i8, i8* %arrayinit.element108, i32 1
-  store i8 32, i8* %arrayinit.element109, align 1
-  %arrayinit.element110 = getelementptr inbounds i8, i8* %arrayinit.element109, i32 1
-  %a23 = load i8, i8* %simd, align 1
-  store i8 %a23, i8* %arrayinit.element110, align 1
-  %arrayinit.element111 = getelementptr inbounds i8, i8* %arrayinit.element110, i32 1
-  store i8 -3, i8* %arrayinit.element111, align 1
-  %arrayinit.element112 = getelementptr inbounds i8, i8* %arrayinit.element111, i32 1
-  store i8 1, i8* %arrayinit.element112, align 1
-  %arrayinit.element113 = getelementptr inbounds i8, i8* %arrayinit.element112, i32 1
-  store i8 2, i8* %arrayinit.element113, align 1
-  %arrayinit.element114 = getelementptr inbounds i8, i8* %arrayinit.element113, i32 1
-  store i8 92, i8* %arrayinit.element114, align 1
-  %arrayinit.element115 = getelementptr inbounds i8, i8* %arrayinit.element114, i32 1
-  store i8 4, i8* %arrayinit.element115, align 1
-  %arrayinit.element116 = getelementptr inbounds i8, i8* %arrayinit.element115, i32 1
-  store i8 64, i8* %arrayinit.element116, align 1
-  %arrayinit.element117 = getelementptr inbounds i8, i8* %arrayinit.element116, i32 1
-  store i8 65, i8* %arrayinit.element117, align 1
-  %arrayinit.element118 = getelementptr inbounds i8, i8* %arrayinit.element117, i32 1
-  store i8 0, i8* %arrayinit.element118, align 1
-  %arrayinit.element119 = getelementptr inbounds i8, i8* %arrayinit.element118, i32 1
-  store i8 15, i8* %arrayinit.element119, align 1
-  %arrayinit.element120 = getelementptr inbounds i8, i8* %arrayinit.element119, i32 1
-  store i8 11, i8* %arrayinit.element120, align 1
-  %arrayinit.element121 = getelementptr inbounds i8, i8* %arrayinit.element120, i32 1
-  store i8 32, i8* %arrayinit.element121, align 1
-  %arrayinit.element122 = getelementptr inbounds i8, i8* %arrayinit.element121, i32 1
-  %a24 = load i8, i8* %old_val, align 1
-  store i8 %a24, i8* %arrayinit.element122, align 1
-  %arrayinit.element123 = getelementptr inbounds i8, i8* %arrayinit.element122, i32 1
-  store i8 32, i8* %arrayinit.element123, align 1
-  %arrayinit.element124 = getelementptr inbounds i8, i8* %arrayinit.element123, i32 1
-  %a25 = load i8, i8* %simd, align 1
-  store i8 %a25, i8* %arrayinit.element124, align 1
-  %arrayinit.element125 = getelementptr inbounds i8, i8* %arrayinit.element124, i32 1
-  store i8 -3, i8* %arrayinit.element125, align 1
-  %arrayinit.element126 = getelementptr inbounds i8, i8* %arrayinit.element125, i32 1
-  store i8 1, i8* %arrayinit.element126, align 1
-  %arrayinit.element127 = getelementptr inbounds i8, i8* %arrayinit.element126, i32 1
-  store i8 3, i8* %arrayinit.element127, align 1
-  %arrayinit.element128 = getelementptr inbounds i8, i8* %arrayinit.element127, i32 1
-  store i8 92, i8* %arrayinit.element128, align 1
-  %arrayinit.element129 = getelementptr inbounds i8, i8* %arrayinit.element128, i32 1
-  store i8 4, i8* %arrayinit.element129, align 1
-  %arrayinit.element130 = getelementptr inbounds i8, i8* %arrayinit.element129, i32 1
-  store i8 64, i8* %arrayinit.element130, align 1
-  %arrayinit.element131 = getelementptr inbounds i8, i8* %arrayinit.element130, i32 1
-  store i8 65, i8* %arrayinit.element131, align 1
-  %arrayinit.element132 = getelementptr inbounds i8, i8* %arrayinit.element131, i32 1
-  store i8 0, i8* %arrayinit.element132, align 1
-  %arrayinit.element133 = getelementptr inbounds i8, i8* %arrayinit.element132, i32 1
-  store i8 15, i8* %arrayinit.element133, align 1
-  %arrayinit.element134 = getelementptr inbounds i8, i8* %arrayinit.element133, i32 1
-  store i8 11, i8* %arrayinit.element134, align 1
-  %arrayinit.element135 = getelementptr inbounds i8, i8* %arrayinit.element134, i32 1
-  store i8 32, i8* %arrayinit.element135, align 1
-  %arrayinit.element136 = getelementptr inbounds i8, i8* %arrayinit.element135, i32 1
-  %a26 = load i8, i8* %simd, align 1
-  store i8 %a26, i8* %arrayinit.element136, align 1
-  %arrayinit.element137 = getelementptr inbounds i8, i8* %arrayinit.element136, i32 1
-  store i8 32, i8* %arrayinit.element137, align 1
-  %arrayinit.element138 = getelementptr inbounds i8, i8* %arrayinit.element137, i32 1
-  %a27 = load i8, i8* %new_val, align 1
-  store i8 %a27, i8* %arrayinit.element138, align 1
-  %arrayinit.element139 = getelementptr inbounds i8, i8* %arrayinit.element138, i32 1
-  store i8 -3, i8* %arrayinit.element139, align 1
-  %arrayinit.element140 = getelementptr inbounds i8, i8* %arrayinit.element139, i32 1
-  store i8 2, i8* %arrayinit.element140, align 1
-  %arrayinit.element141 = getelementptr inbounds i8, i8* %arrayinit.element140, i32 1
-  store i8 2, i8* %arrayinit.element141, align 1
-  %arrayinit.element142 = getelementptr inbounds i8, i8* %arrayinit.element141, i32 1
-  store i8 33, i8* %arrayinit.element142, align 1
-  %arrayinit.element143 = getelementptr inbounds i8, i8* %arrayinit.element142, i32 1
-  %a28 = load i8, i8* %simd, align 1
-  store i8 %a28, i8* %arrayinit.element143, align 1
-  %arrayinit.element144 = getelementptr inbounds i8, i8* %arrayinit.element143, i32 1
-  store i8 32, i8* %arrayinit.element144, align 1
-  %arrayinit.element145 = getelementptr inbounds i8, i8* %arrayinit.element144, i32 1
-  %a29 = load i8, i8* %new_val, align 1
-  store i8 %a29, i8* %arrayinit.element145, align 1
-  %arrayinit.element146 = getelementptr inbounds i8, i8* %arrayinit.element145, i32 1
-  store i8 32, i8* %arrayinit.element146, align 1
-  %arrayinit.element147 = getelementptr inbounds i8, i8* %arrayinit.element146, i32 1
-  %a30 = load i8, i8* %simd, align 1
-  store i8 %a30, i8* %arrayinit.element147, align 1
-  %arrayinit.element148 = getelementptr inbounds i8, i8* %arrayinit.element147, i32 1
-  store i8 -3, i8* %arrayinit.element148, align 1
-  %arrayinit.element149 = getelementptr inbounds i8, i8* %arrayinit.element148, i32 1
-  store i8 1, i8* %arrayinit.element149, align 1
-  %arrayinit.element150 = getelementptr inbounds i8, i8* %arrayinit.element149, i32 1
-  store i8 0, i8* %arrayinit.element150, align 1
-  %arrayinit.element151 = getelementptr inbounds i8, i8* %arrayinit.element150, i32 1
-  store i8 92, i8* %arrayinit.element151, align 1
-  %arrayinit.element152 = getelementptr inbounds i8, i8* %arrayinit.element151, i32 1
-  store i8 4, i8* %arrayinit.element152, align 1
-  %arrayinit.element153 = getelementptr inbounds i8, i8* %arrayinit.element152, i32 1
-  store i8 64, i8* %arrayinit.element153, align 1
-  %arrayinit.element154 = getelementptr inbounds i8, i8* %arrayinit.element153, i32 1
-  store i8 65, i8* %arrayinit.element154, align 1
-  %arrayinit.element155 = getelementptr inbounds i8, i8* %arrayinit.element154, i32 1
-  store i8 0, i8* %arrayinit.element155, align 1
-  %arrayinit.element156 = getelementptr inbounds i8, i8* %arrayinit.element155, i32 1
-  store i8 15, i8* %arrayinit.element156, align 1
-  %arrayinit.element157 = getelementptr inbounds i8, i8* %arrayinit.element156, i32 1
-  store i8 11, i8* %arrayinit.element157, align 1
-  %arrayinit.element158 = getelementptr inbounds i8, i8* %arrayinit.element157, i32 1
-  store i8 32, i8* %arrayinit.element158, align 1
-  %arrayinit.element159 = getelementptr inbounds i8, i8* %arrayinit.element158, i32 1
-  %a31 = load i8, i8* %new_val, align 1
-  store i8 %a31, i8* %arrayinit.element159, align 1
-  %arrayinit.element160 = getelementptr inbounds i8, i8* %arrayinit.element159, i32 1
-  store i8 32, i8* %arrayinit.element160, align 1
-  %arrayinit.element161 = getelementptr inbounds i8, i8* %arrayinit.element160, i32 1
-  %a32 = load i8, i8* %simd, align 1
-  store i8 %a32, i8* %arrayinit.element161, align 1
-  %arrayinit.element162 = getelementptr inbounds i8, i8* %arrayinit.element161, i32 1
-  store i8 -3, i8* %arrayinit.element162, align 1
-  %arrayinit.element163 = getelementptr inbounds i8, i8* %arrayinit.element162, i32 1
-  store i8 1, i8* %arrayinit.element163, align 1
-  %arrayinit.element164 = getelementptr inbounds i8, i8* %arrayinit.element163, i32 1
-  store i8 1, i8* %arrayinit.element164, align 1
-  %arrayinit.element165 = getelementptr inbounds i8, i8* %arrayinit.element164, i32 1
-  store i8 92, i8* %arrayinit.element165, align 1
-  %arrayinit.element166 = getelementptr inbounds i8, i8* %arrayinit.element165, i32 1
-  store i8 4, i8* %arrayinit.element166, align 1
-  %arrayinit.element167 = getelementptr inbounds i8, i8* %arrayinit.element166, i32 1
-  store i8 64, i8* %arrayinit.element167, align 1
-  %arrayinit.element168 = getelementptr inbounds i8, i8* %arrayinit.element167, i32 1
-  store i8 65, i8* %arrayinit.element168, align 1
-  %arrayinit.element169 = getelementptr inbounds i8, i8* %arrayinit.element168, i32 1
-  store i8 0, i8* %arrayinit.element169, align 1
-  %arrayinit.element170 = getelementptr inbounds i8, i8* %arrayinit.element169, i32 1
-  store i8 15, i8* %arrayinit.element170, align 1
-  %arrayinit.element171 = getelementptr inbounds i8, i8* %arrayinit.element170, i32 1
-  store i8 11, i8* %arrayinit.element171, align 1
-  %arrayinit.element172 = getelementptr inbounds i8, i8* %arrayinit.element171, i32 1
-  store i8 32, i8* %arrayinit.element172, align 1
-  %arrayinit.element173 = getelementptr inbounds i8, i8* %arrayinit.element172, i32 1
-  %a33 = load i8, i8* %new_val, align 1
-  store i8 %a33, i8* %arrayinit.element173, align 1
-  %arrayinit.element174 = getelementptr inbounds i8, i8* %arrayinit.element173, i32 1
-  store i8 32, i8* %arrayinit.element174, align 1
-  %arrayinit.element175 = getelementptr inbounds i8, i8* %arrayinit.element174, i32 1
-  %a34 = load i8, i8* %simd, align 1
-  store i8 %a34, i8* %arrayinit.element175, align 1
-  %arrayinit.element176 = getelementptr inbounds i8, i8* %arrayinit.element175, i32 1
-  store i8 -3, i8* %arrayinit.element176, align 1
-  %arrayinit.element177 = getelementptr inbounds i8, i8* %arrayinit.element176, i32 1
-  store i8 1, i8* %arrayinit.element177, align 1
-  %arrayinit.element178 = getelementptr inbounds i8, i8* %arrayinit.element177, i32 1
-  store i8 2, i8* %arrayinit.element178, align 1
-  %arrayinit.element179 = getelementptr inbounds i8, i8* %arrayinit.element178, i32 1
-  store i8 92, i8* %arrayinit.element179, align 1
-  %arrayinit.element180 = getelementptr inbounds i8, i8* %arrayinit.element179, i32 1
-  store i8 4, i8* %arrayinit.element180, align 1
-  %arrayinit.element181 = getelementptr inbounds i8, i8* %arrayinit.element180, i32 1
-  store i8 64, i8* %arrayinit.element181, align 1
-  %arrayinit.element182 = getelementptr inbounds i8, i8* %arrayinit.element181, i32 1
-  store i8 65, i8* %arrayinit.element182, align 1
-  %arrayinit.element183 = getelementptr inbounds i8, i8* %arrayinit.element182, i32 1
-  store i8 0, i8* %arrayinit.element183, align 1
-  %arrayinit.element184 = getelementptr inbounds i8, i8* %arrayinit.element183, i32 1
-  store i8 15, i8* %arrayinit.element184, align 1
-  %arrayinit.element185 = getelementptr inbounds i8, i8* %arrayinit.element184, i32 1
-  store i8 11, i8* %arrayinit.element185, align 1
-  %arrayinit.element186 = getelementptr inbounds i8, i8* %arrayinit.element185, i32 1
-  store i8 32, i8* %arrayinit.element186, align 1
-  %arrayinit.element187 = getelementptr inbounds i8, i8* %arrayinit.element186, i32 1
-  %a35 = load i8, i8* %old_val, align 1
-  store i8 %a35, i8* %arrayinit.element187, align 1
-  %arrayinit.element188 = getelementptr inbounds i8, i8* %arrayinit.element187, i32 1
-  store i8 32, i8* %arrayinit.element188, align 1
-  %arrayinit.element189 = getelementptr inbounds i8, i8* %arrayinit.element188, i32 1
-  %a36 = load i8, i8* %simd, align 1
-  store i8 %a36, i8* %arrayinit.element189, align 1
-  %arrayinit.element190 = getelementptr inbounds i8, i8* %arrayinit.element189, i32 1
-  store i8 -3, i8* %arrayinit.element190, align 1
-  %arrayinit.element191 = getelementptr inbounds i8, i8* %arrayinit.element190, i32 1
-  store i8 1, i8* %arrayinit.element191, align 1
-  %arrayinit.element192 = getelementptr inbounds i8, i8* %arrayinit.element191, i32 1
-  store i8 3, i8* %arrayinit.element192, align 1
-  %arrayinit.element193 = getelementptr inbounds i8, i8* %arrayinit.element192, i32 1
-  store i8 92, i8* %arrayinit.element193, align 1
-  %arrayinit.element194 = getelementptr inbounds i8, i8* %arrayinit.element193, i32 1
-  store i8 4, i8* %arrayinit.element194, align 1
-  %arrayinit.element195 = getelementptr inbounds i8, i8* %arrayinit.element194, i32 1
-  store i8 64, i8* %arrayinit.element195, align 1
-  %arrayinit.element196 = getelementptr inbounds i8, i8* %arrayinit.element195, i32 1
-  store i8 65, i8* %arrayinit.element196, align 1
-  %arrayinit.element197 = getelementptr inbounds i8, i8* %arrayinit.element196, i32 1
-  store i8 0, i8* %arrayinit.element197, align 1
-  %arrayinit.element198 = getelementptr inbounds i8, i8* %arrayinit.element197, i32 1
-  store i8 15, i8* %arrayinit.element198, align 1
-  %arrayinit.element199 = getelementptr inbounds i8, i8* %arrayinit.element198, i32 1
-  store i8 11, i8* %arrayinit.element199, align 1
-  %arrayinit.element200 = getelementptr inbounds i8, i8* %arrayinit.element199, i32 1
-  store i8 32, i8* %arrayinit.element200, align 1
-  %arrayinit.element201 = getelementptr inbounds i8, i8* %arrayinit.element200, i32 1
-  %a37 = load i8, i8* %simd, align 1
-  store i8 %a37, i8* %arrayinit.element201, align 1
-  %arrayinit.element202 = getelementptr inbounds i8, i8* %arrayinit.element201, i32 1
-  store i8 32, i8* %arrayinit.element202, align 1
-  %arrayinit.element203 = getelementptr inbounds i8, i8* %arrayinit.element202, i32 1
-  %a38 = load i8, i8* %new_val, align 1
-  store i8 %a38, i8* %arrayinit.element203, align 1
-  %arrayinit.element204 = getelementptr inbounds i8, i8* %arrayinit.element203, i32 1
-  store i8 -3, i8* %arrayinit.element204, align 1
-  %arrayinit.element205 = getelementptr inbounds i8, i8* %arrayinit.element204, i32 1
-  store i8 2, i8* %arrayinit.element205, align 1
-  %arrayinit.element206 = getelementptr inbounds i8, i8* %arrayinit.element205, i32 1
-  store i8 3, i8* %arrayinit.element206, align 1
-  %arrayinit.element207 = getelementptr inbounds i8, i8* %arrayinit.element206, i32 1
-  store i8 33, i8* %arrayinit.element207, align 1
-  %arrayinit.element208 = getelementptr inbounds i8, i8* %arrayinit.element207, i32 1
-  %a39 = load i8, i8* %simd, align 1
-  store i8 %a39, i8* %arrayinit.element208, align 1
-  %arrayinit.element209 = getelementptr inbounds i8, i8* %arrayinit.element208, i32 1
-  store i8 32, i8* %arrayinit.element209, align 1
-  %arrayinit.element210 = getelementptr inbounds i8, i8* %arrayinit.element209, i32 1
-  %a40 = load i8, i8* %new_val, align 1
-  store i8 %a40, i8* %arrayinit.element210, align 1
-  %arrayinit.element211 = getelementptr inbounds i8, i8* %arrayinit.element210, i32 1
-  store i8 32, i8* %arrayinit.element211, align 1
-  %arrayinit.element212 = getelementptr inbounds i8, i8* %arrayinit.element211, i32 1
-  %a41 = load i8, i8* %simd, align 1
-  store i8 %a41, i8* %arrayinit.element212, align 1
-  %arrayinit.element213 = getelementptr inbounds i8, i8* %arrayinit.element212, i32 1
-  store i8 -3, i8* %arrayinit.element213, align 1
-  %arrayinit.element214 = getelementptr inbounds i8, i8* %arrayinit.element213, i32 1
-  store i8 1, i8* %arrayinit.element214, align 1
-  %arrayinit.element215 = getelementptr inbounds i8, i8* %arrayinit.element214, i32 1
-  store i8 0, i8* %arrayinit.element215, align 1
-  %arrayinit.element216 = getelementptr inbounds i8, i8* %arrayinit.element215, i32 1
-  store i8 92, i8* %arrayinit.element216, align 1
-  %arrayinit.element217 = getelementptr inbounds i8, i8* %arrayinit.element216, i32 1
-  store i8 4, i8* %arrayinit.element217, align 1
-  %arrayinit.element218 = getelementptr inbounds i8, i8* %arrayinit.element217, i32 1
-  store i8 64, i8* %arrayinit.element218, align 1
-  %arrayinit.element219 = getelementptr inbounds i8, i8* %arrayinit.element218, i32 1
-  store i8 65, i8* %arrayinit.element219, align 1
-  %arrayinit.element220 = getelementptr inbounds i8, i8* %arrayinit.element219, i32 1
-  store i8 0, i8* %arrayinit.element220, align 1
-  %arrayinit.element221 = getelementptr inbounds i8, i8* %arrayinit.element220, i32 1
-  store i8 15, i8* %arrayinit.element221, align 1
-  %arrayinit.element222 = getelementptr inbounds i8, i8* %arrayinit.element221, i32 1
-  store i8 11, i8* %arrayinit.element222, align 1
-  %arrayinit.element223 = getelementptr inbounds i8, i8* %arrayinit.element222, i32 1
-  store i8 32, i8* %arrayinit.element223, align 1
-  %arrayinit.element224 = getelementptr inbounds i8, i8* %arrayinit.element223, i32 1
-  %a42 = load i8, i8* %new_val, align 1
-  store i8 %a42, i8* %arrayinit.element224, align 1
-  %arrayinit.element225 = getelementptr inbounds i8, i8* %arrayinit.element224, i32 1
-  store i8 32, i8* %arrayinit.element225, align 1
-  %arrayinit.element226 = getelementptr inbounds i8, i8* %arrayinit.element225, i32 1
-  %a43 = load i8, i8* %simd, align 1
-  store i8 %a43, i8* %arrayinit.element226, align 1
-  %arrayinit.element227 = getelementptr inbounds i8, i8* %arrayinit.element226, i32 1
-  store i8 -3, i8* %arrayinit.element227, align 1
-  %arrayinit.element228 = getelementptr inbounds i8, i8* %arrayinit.element227, i32 1
-  store i8 1, i8* %arrayinit.element228, align 1
-  %arrayinit.element229 = getelementptr inbounds i8, i8* %arrayinit.element228, i32 1
-  store i8 1, i8* %arrayinit.element229, align 1
-  %arrayinit.element230 = getelementptr inbounds i8, i8* %arrayinit.element229, i32 1
-  store i8 92, i8* %arrayinit.element230, align 1
-  %arrayinit.element231 = getelementptr inbounds i8, i8* %arrayinit.element230, i32 1
-  store i8 4, i8* %arrayinit.element231, align 1
-  %arrayinit.element232 = getelementptr inbounds i8, i8* %arrayinit.element231, i32 1
-  store i8 64, i8* %arrayinit.element232, align 1
-  %arrayinit.element233 = getelementptr inbounds i8, i8* %arrayinit.element232, i32 1
-  store i8 65, i8* %arrayinit.element233, align 1
-  %arrayinit.element234 = getelementptr inbounds i8, i8* %arrayinit.element233, i32 1
-  store i8 0, i8* %arrayinit.element234, align 1
-  %arrayinit.element235 = getelementptr inbounds i8, i8* %arrayinit.element234, i32 1
-  store i8 15, i8* %arrayinit.element235, align 1
-  %arrayinit.element236 = getelementptr inbounds i8, i8* %arrayinit.element235, i32 1
-  store i8 11, i8* %arrayinit.element236, align 1
-  %arrayinit.element237 = getelementptr inbounds i8, i8* %arrayinit.element236, i32 1
-  store i8 32, i8* %arrayinit.element237, align 1
-  %arrayinit.element238 = getelementptr inbounds i8, i8* %arrayinit.element237, i32 1
-  %a44 = load i8, i8* %new_val, align 1
-  store i8 %a44, i8* %arrayinit.element238, align 1
-  %arrayinit.element239 = getelementptr inbounds i8, i8* %arrayinit.element238, i32 1
-  store i8 32, i8* %arrayinit.element239, align 1
-  %arrayinit.element240 = getelementptr inbounds i8, i8* %arrayinit.element239, i32 1
-  %a45 = load i8, i8* %simd, align 1
-  store i8 %a45, i8* %arrayinit.element240, align 1
-  %arrayinit.element241 = getelementptr inbounds i8, i8* %arrayinit.element240, i32 1
-  store i8 -3, i8* %arrayinit.element241, align 1
-  %arrayinit.element242 = getelementptr inbounds i8, i8* %arrayinit.element241, i32 1
-  store i8 1, i8* %arrayinit.element242, align 1
-  %arrayinit.element243 = getelementptr inbounds i8, i8* %arrayinit.element242, i32 1
-  store i8 2, i8* %arrayinit.element243, align 1
-  %arrayinit.element244 = getelementptr inbounds i8, i8* %arrayinit.element243, i32 1
-  store i8 92, i8* %arrayinit.element244, align 1
-  %arrayinit.element245 = getelementptr inbounds i8, i8* %arrayinit.element244, i32 1
-  store i8 4, i8* %arrayinit.element245, align 1
-  %arrayinit.element246 = getelementptr inbounds i8, i8* %arrayinit.element245, i32 1
-  store i8 64, i8* %arrayinit.element246, align 1
-  %arrayinit.element247 = getelementptr inbounds i8, i8* %arrayinit.element246, i32 1
-  store i8 65, i8* %arrayinit.element247, align 1
-  %arrayinit.element248 = getelementptr inbounds i8, i8* %arrayinit.element247, i32 1
-  store i8 0, i8* %arrayinit.element248, align 1
-  %arrayinit.element249 = getelementptr inbounds i8, i8* %arrayinit.element248, i32 1
-  store i8 15, i8* %arrayinit.element249, align 1
-  %arrayinit.element250 = getelementptr inbounds i8, i8* %arrayinit.element249, i32 1
-  store i8 11, i8* %arrayinit.element250, align 1
-  %arrayinit.element251 = getelementptr inbounds i8, i8* %arrayinit.element250, i32 1
-  store i8 32, i8* %arrayinit.element251, align 1
-  %arrayinit.element252 = getelementptr inbounds i8, i8* %arrayinit.element251, i32 1
-  %a46 = load i8, i8* %new_val, align 1
-  store i8 %a46, i8* %arrayinit.element252, align 1
-  %arrayinit.element253 = getelementptr inbounds i8, i8* %arrayinit.element252, i32 1
-  store i8 32, i8* %arrayinit.element253, align 1
-  %arrayinit.element254 = getelementptr inbounds i8, i8* %arrayinit.element253, i32 1
-  %a47 = load i8, i8* %simd, align 1
-  store i8 %a47, i8* %arrayinit.element254, align 1
-  %arrayinit.element255 = getelementptr inbounds i8, i8* %arrayinit.element254, i32 1
-  store i8 -3, i8* %arrayinit.element255, align 1
-  %arrayinit.element256 = getelementptr inbounds i8, i8* %arrayinit.element255, i32 1
-  store i8 1, i8* %arrayinit.element256, align 1
-  %arrayinit.element257 = getelementptr inbounds i8, i8* %arrayinit.element256, i32 1
-  store i8 3, i8* %arrayinit.element257, align 1
-  %arrayinit.element258 = getelementptr inbounds i8, i8* %arrayinit.element257, i32 1
-  store i8 92, i8* %arrayinit.element258, align 1
-  %arrayinit.element259 = getelementptr inbounds i8, i8* %arrayinit.element258, i32 1
-  store i8 4, i8* %arrayinit.element259, align 1
-  %arrayinit.element260 = getelementptr inbounds i8, i8* %arrayinit.element259, i32 1
-  store i8 64, i8* %arrayinit.element260, align 1
-  %arrayinit.element261 = getelementptr inbounds i8, i8* %arrayinit.element260, i32 1
-  store i8 65, i8* %arrayinit.element261, align 1
-  %arrayinit.element262 = getelementptr inbounds i8, i8* %arrayinit.element261, i32 1
-  store i8 0, i8* %arrayinit.element262, align 1
-  %arrayinit.element263 = getelementptr inbounds i8, i8* %arrayinit.element262, i32 1
-  store i8 15, i8* %arrayinit.element263, align 1
-  %arrayinit.element264 = getelementptr inbounds i8, i8* %arrayinit.element263, i32 1
-  store i8 11, i8* %arrayinit.element264, align 1
-  %arrayinit.element265 = getelementptr inbounds i8, i8* %arrayinit.element264, i32 1
-  store i8 65, i8* %arrayinit.element265, align 1
-  %arrayinit.element266 = getelementptr inbounds i8, i8* %arrayinit.element265, i32 1
-  store i8 1, i8* %arrayinit.element266, align 1
-  %arrayinit.element267 = getelementptr inbounds i8, i8* %arrayinit.element266, i32 1
-  store i8 15, i8* %arrayinit.element267, align 1
-  %arraydecay = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
-  %arraydecay268 = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0
-  %add.ptr = getelementptr inbounds i8, i8* %arraydecay268, i32 269
-  call void @g(i8* %arraydecay, i8* %add.ptr)
+  store i8 32, ptr %code, align 1
+  %arrayinit.element = getelementptr inbounds i8, ptr %code, i32 1
+  %a2 = load i8, ptr %old_val, align 1
+  store i8 %a2, ptr %arrayinit.element, align 1
+  %arrayinit.element1 = getelementptr inbounds i8, ptr %arrayinit.element, i32 1
+  store i8 -3, ptr %arrayinit.element1, align 1
+  %arrayinit.element2 = getelementptr inbounds i8, ptr %arrayinit.element1, i32 1
+  store i8 0, ptr %arrayinit.element2, align 1
+  %arrayinit.element3 = getelementptr inbounds i8, ptr %arrayinit.element2, i32 1
+  store i8 33, ptr %arrayinit.element3, align 1
+  %arrayinit.element4 = getelementptr inbounds i8, ptr %arrayinit.element3, i32 1
+  %a3 = load i8, ptr %simd, align 1
+  store i8 %a3, ptr %arrayinit.element4, align 1
+  %arrayinit.element5 = getelementptr inbounds i8, ptr %arrayinit.element4, i32 1
+  store i8 32, ptr %arrayinit.element5, align 1
+  %arrayinit.element6 = getelementptr inbounds i8, ptr %arrayinit.element5, i32 1
+  %a4 = load i8, ptr %simd, align 1
+  store i8 %a4, ptr %arrayinit.element6, align 1
+  %arrayinit.element7 = getelementptr inbounds i8, ptr %arrayinit.element6, i32 1
+  store i8 32, ptr %arrayinit.element7, align 1
+  %arrayinit.element8 = getelementptr inbounds i8, ptr %arrayinit.element7, i32 1
+  %a5 = load i8, ptr %new_val, align 1
+  store i8 %a5, ptr %arrayinit.element8, align 1
+  %arrayinit.element9 = getelementptr inbounds i8, ptr %arrayinit.element8, i32 1
+  store i8 -3, ptr %arrayinit.element9, align 1
+  %arrayinit.element10 = getelementptr inbounds i8, ptr %arrayinit.element9, i32 1
+  store i8 2, ptr %arrayinit.element10, align 1
+  %arrayinit.element11 = getelementptr inbounds i8, ptr %arrayinit.element10, i32 1
+  store i8 0, ptr %arrayinit.element11, align 1
+  %arrayinit.element12 = getelementptr inbounds i8, ptr %arrayinit.element11, i32 1
+  store i8 33, ptr %arrayinit.element12, align 1
+  %arrayinit.element13 = getelementptr inbounds i8, ptr %arrayinit.element12, i32 1
+  %a6 = load i8, ptr %simd, align 1
+  store i8 %a6, ptr %arrayinit.element13, align 1
+  %arrayinit.element14 = getelementptr inbounds i8, ptr %arrayinit.element13, i32 1
+  store i8 32, ptr %arrayinit.element14, align 1
+  %arrayinit.element15 = getelementptr inbounds i8, ptr %arrayinit.element14, i32 1
+  %a7 = load i8, ptr %new_val, align 1
+  store i8 %a7, ptr %arrayinit.element15, align 1
+  %arrayinit.element16 = getelementptr inbounds i8, ptr %arrayinit.element15, i32 1
+  store i8 32, ptr %arrayinit.element16, align 1
+  %arrayinit.element17 = getelementptr inbounds i8, ptr %arrayinit.element16, i32 1
+  %a8 = load i8, ptr %simd, align 1
+  store i8 %a8, ptr %arrayinit.element17, align 1
+  %arrayinit.element18 = getelementptr inbounds i8, ptr %arrayinit.element17, i32 1
+  store i8 -3, ptr %arrayinit.element18, align 1
+  %arrayinit.element19 = getelementptr inbounds i8, ptr %arrayinit.element18, i32 1
+  store i8 1, ptr %arrayinit.element19, align 1
+  %arrayinit.element20 = getelementptr inbounds i8, ptr %arrayinit.element19, i32 1
+  store i8 0, ptr %arrayinit.element20, align 1
+  %arrayinit.element21 = getelementptr inbounds i8, ptr %arrayinit.element20, i32 1
+  store i8 92, ptr %arrayinit.element21, align 1
+  %arrayinit.element22 = getelementptr inbounds i8, ptr %arrayinit.element21, i32 1
+  store i8 4, ptr %arrayinit.element22, align 1
+  %arrayinit.element23 = getelementptr inbounds i8, ptr %arrayinit.element22, i32 1
+  store i8 64, ptr %arrayinit.element23, align 1
+  %arrayinit.element24 = getelementptr inbounds i8, ptr %arrayinit.element23, i32 1
+  store i8 65, ptr %arrayinit.element24, align 1
+  %arrayinit.element25 = getelementptr inbounds i8, ptr %arrayinit.element24, i32 1
+  store i8 0, ptr %arrayinit.element25, align 1
+  %arrayinit.element26 = getelementptr inbounds i8, ptr %arrayinit.element25, i32 1
+  store i8 15, ptr %arrayinit.element26, align 1
+  %arrayinit.element27 = getelementptr inbounds i8, ptr %arrayinit.element26, i32 1
+  store i8 11, ptr %arrayinit.element27, align 1
+  %arrayinit.element28 = getelementptr inbounds i8, ptr %arrayinit.element27, i32 1
+  store i8 32, ptr %arrayinit.element28, align 1
+  %arrayinit.element29 = getelementptr inbounds i8, ptr %arrayinit.element28, i32 1
+  %a9 = load i8, ptr %old_val, align 1
+  store i8 %a9, ptr %arrayinit.element29, align 1
+  %arrayinit.element30 = getelementptr inbounds i8, ptr %arrayinit.element29, i32 1
+  store i8 32, ptr %arrayinit.element30, align 1
+  %arrayinit.element31 = getelementptr inbounds i8, ptr %arrayinit.element30, i32 1
+  %a10 = load i8, ptr %simd, align 1
+  store i8 %a10, ptr %arrayinit.element31, align 1
+  %arrayinit.element32 = getelementptr inbounds i8, ptr %arrayinit.element31, i32 1
+  store i8 -3, ptr %arrayinit.element32, align 1
+  %arrayinit.element33 = getelementptr inbounds i8, ptr %arrayinit.element32, i32 1
+  store i8 1, ptr %arrayinit.element33, align 1
+  %arrayinit.element34 = getelementptr inbounds i8, ptr %arrayinit.element33, i32 1
+  store i8 1, ptr %arrayinit.element34, align 1
+  %arrayinit.element35 = getelementptr inbounds i8, ptr %arrayinit.element34, i32 1
+  store i8 92, ptr %arrayinit.element35, align 1
+  %arrayinit.element36 = getelementptr inbounds i8, ptr %arrayinit.element35, i32 1
+  store i8 4, ptr %arrayinit.element36, align 1
+  %arrayinit.element37 = getelementptr inbounds i8, ptr %arrayinit.element36, i32 1
+  store i8 64, ptr %arrayinit.element37, align 1
+  %arrayinit.element38 = getelementptr inbounds i8, ptr %arrayinit.element37, i32 1
+  store i8 65, ptr %arrayinit.element38, align 1
+  %arrayinit.element39 = getelementptr inbounds i8, ptr %arrayinit.element38, i32 1
+  store i8 0, ptr %arrayinit.element39, align 1
+  %arrayinit.element40 = getelementptr inbounds i8, ptr %arrayinit.element39, i32 1
+  store i8 15, ptr %arrayinit.element40, align 1
+  %arrayinit.element41 = getelementptr inbounds i8, ptr %arrayinit.element40, i32 1
+  store i8 11, ptr %arrayinit.element41, align 1
+  %arrayinit.element42 = getelementptr inbounds i8, ptr %arrayinit.element41, i32 1
+  store i8 32, ptr %arrayinit.element42, align 1
+  %arrayinit.element43 = getelementptr inbounds i8, ptr %arrayinit.element42, i32 1
+  %a11 = load i8, ptr %old_val, align 1
+  store i8 %a11, ptr %arrayinit.element43, align 1
+  %arrayinit.element44 = getelementptr inbounds i8, ptr %arrayinit.element43, i32 1
+  store i8 32, ptr %arrayinit.element44, align 1
+  %arrayinit.element45 = getelementptr inbounds i8, ptr %arrayinit.element44, i32 1
+  %a12 = load i8, ptr %simd, align 1
+  store i8 %a12, ptr %arrayinit.element45, align 1
+  %arrayinit.element46 = getelementptr inbounds i8, ptr %arrayinit.element45, i32 1
+  store i8 -3, ptr %arrayinit.element46, align 1
+  %arrayinit.element47 = getelementptr inbounds i8, ptr %arrayinit.element46, i32 1
+  store i8 1, ptr %arrayinit.element47, align 1
+  %arrayinit.element48 = getelementptr inbounds i8, ptr %arrayinit.element47, i32 1
+  store i8 2, ptr %arrayinit.element48, align 1
+  %arrayinit.element49 = getelementptr inbounds i8, ptr %arrayinit.element48, i32 1
+  store i8 92, ptr %arrayinit.element49, align 1
+  %arrayinit.element50 = getelementptr inbounds i8, ptr %arrayinit.element49, i32 1
+  store i8 4, ptr %arrayinit.element50, align 1
+  %arrayinit.element51 = getelementptr inbounds i8, ptr %arrayinit.element50, i32 1
+  store i8 64, ptr %arrayinit.element51, align 1
+  %arrayinit.element52 = getelementptr inbounds i8, ptr %arrayinit.element51, i32 1
+  store i8 65, ptr %arrayinit.element52, align 1
+  %arrayinit.element53 = getelementptr inbounds i8, ptr %arrayinit.element52, i32 1
+  store i8 0, ptr %arrayinit.element53, align 1
+  %arrayinit.element54 = getelementptr inbounds i8, ptr %arrayinit.element53, i32 1
+  store i8 15, ptr %arrayinit.element54, align 1
+  %arrayinit.element55 = getelementptr inbounds i8, ptr %arrayinit.element54, i32 1
+  store i8 11, ptr %arrayinit.element55, align 1
+  %arrayinit.element56 = getelementptr inbounds i8, ptr %arrayinit.element55, i32 1
+  store i8 32, ptr %arrayinit.element56, align 1
+  %arrayinit.element57 = getelementptr inbounds i8, ptr %arrayinit.element56, i32 1
+  %a13 = load i8, ptr %old_val, align 1
+  store i8 %a13, ptr %arrayinit.element57, align 1
+  %arrayinit.element58 = getelementptr inbounds i8, ptr %arrayinit.element57, i32 1
+  store i8 32, ptr %arrayinit.element58, align 1
+  %arrayinit.element59 = getelementptr inbounds i8, ptr %arrayinit.element58, i32 1
+  %a14 = load i8, ptr %simd, align 1
+  store i8 %a14, ptr %arrayinit.element59, align 1
+  %arrayinit.element60 = getelementptr inbounds i8, ptr %arrayinit.element59, i32 1
+  store i8 -3, ptr %arrayinit.element60, align 1
+  %arrayinit.element61 = getelementptr inbounds i8, ptr %arrayinit.element60, i32 1
+  store i8 1, ptr %arrayinit.element61, align 1
+  %arrayinit.element62 = getelementptr inbounds i8, ptr %arrayinit.element61, i32 1
+  store i8 3, ptr %arrayinit.element62, align 1
+  %arrayinit.element63 = getelementptr inbounds i8, ptr %arrayinit.element62, i32 1
+  store i8 92, ptr %arrayinit.element63, align 1
+  %arrayinit.element64 = getelementptr inbounds i8, ptr %arrayinit.element63, i32 1
+  store i8 4, ptr %arrayinit.element64, align 1
+  %arrayinit.element65 = getelementptr inbounds i8, ptr %arrayinit.element64, i32 1
+  store i8 64, ptr %arrayinit.element65, align 1
+  %arrayinit.element66 = getelementptr inbounds i8, ptr %arrayinit.element65, i32 1
+  store i8 65, ptr %arrayinit.element66, align 1
+  %arrayinit.element67 = getelementptr inbounds i8, ptr %arrayinit.element66, i32 1
+  store i8 0, ptr %arrayinit.element67, align 1
+  %arrayinit.element68 = getelementptr inbounds i8, ptr %arrayinit.element67, i32 1
+  store i8 15, ptr %arrayinit.element68, align 1
+  %arrayinit.element69 = getelementptr inbounds i8, ptr %arrayinit.element68, i32 1
+  store i8 11, ptr %arrayinit.element69, align 1
+  %arrayinit.element70 = getelementptr inbounds i8, ptr %arrayinit.element69, i32 1
+  store i8 32, ptr %arrayinit.element70, align 1
+  %arrayinit.element71 = getelementptr inbounds i8, ptr %arrayinit.element70, i32 1
+  %a15 = load i8, ptr %simd, align 1
+  store i8 %a15, ptr %arrayinit.element71, align 1
+  %arrayinit.element72 = getelementptr inbounds i8, ptr %arrayinit.element71, i32 1
+  store i8 32, ptr %arrayinit.element72, align 1
+  %arrayinit.element73 = getelementptr inbounds i8, ptr %arrayinit.element72, i32 1
+  %a16 = load i8, ptr %new_val, align 1
+  store i8 %a16, ptr %arrayinit.element73, align 1
+  %arrayinit.element74 = getelementptr inbounds i8, ptr %arrayinit.element73, i32 1
+  store i8 -3, ptr %arrayinit.element74, align 1
+  %arrayinit.element75 = getelementptr inbounds i8, ptr %arrayinit.element74, i32 1
+  store i8 2, ptr %arrayinit.element75, align 1
+  %arrayinit.element76 = getelementptr inbounds i8, ptr %arrayinit.element75, i32 1
+  store i8 1, ptr %arrayinit.element76, align 1
+  %arrayinit.element77 = getelementptr inbounds i8, ptr %arrayinit.element76, i32 1
+  store i8 33, ptr %arrayinit.element77, align 1
+  %arrayinit.element78 = getelementptr inbounds i8, ptr %arrayinit.element77, i32 1
+  %a17 = load i8, ptr %simd, align 1
+  store i8 %a17, ptr %arrayinit.element78, align 1
+  %arrayinit.element79 = getelementptr inbounds i8, ptr %arrayinit.element78, i32 1
+  store i8 32, ptr %arrayinit.element79, align 1
+  %arrayinit.element80 = getelementptr inbounds i8, ptr %arrayinit.element79, i32 1
+  %a18 = load i8, ptr %new_val, align 1
+  store i8 %a18, ptr %arrayinit.element80, align 1
+  %arrayinit.element81 = getelementptr inbounds i8, ptr %arrayinit.element80, i32 1
+  store i8 32, ptr %arrayinit.element81, align 1
+  %arrayinit.element82 = getelementptr inbounds i8, ptr %arrayinit.element81, i32 1
+  %a19 = load i8, ptr %simd, align 1
+  store i8 %a19, ptr %arrayinit.element82, align 1
+  %arrayinit.element83 = getelementptr inbounds i8, ptr %arrayinit.element82, i32 1
+  store i8 -3, ptr %arrayinit.element83, align 1
+  %arrayinit.element84 = getelementptr inbounds i8, ptr %arrayinit.element83, i32 1
+  store i8 1, ptr %arrayinit.element84, align 1
+  %arrayinit.element85 = getelementptr inbounds i8, ptr %arrayinit.element84, i32 1
+  store i8 0, ptr %arrayinit.element85, align 1
+  %arrayinit.element86 = getelementptr inbounds i8, ptr %arrayinit.element85, i32 1
+  store i8 92, ptr %arrayinit.element86, align 1
+  %arrayinit.element87 = getelementptr inbounds i8, ptr %arrayinit.element86, i32 1
+  store i8 4, ptr %arrayinit.element87, align 1
+  %arrayinit.element88 = getelementptr inbounds i8, ptr %arrayinit.element87, i32 1
+  store i8 64, ptr %arrayinit.element88, align 1
+  %arrayinit.element89 = getelementptr inbounds i8, ptr %arrayinit.element88, i32 1
+  store i8 65, ptr %arrayinit.element89, align 1
+  %arrayinit.element90 = getelementptr inbounds i8, ptr %arrayinit.element89, i32 1
+  store i8 0, ptr %arrayinit.element90, align 1
+  %arrayinit.element91 = getelementptr inbounds i8, ptr %arrayinit.element90, i32 1
+  store i8 15, ptr %arrayinit.element91, align 1
+  %arrayinit.element92 = getelementptr inbounds i8, ptr %arrayinit.element91, i32 1
+  store i8 11, ptr %arrayinit.element92, align 1
+  %arrayinit.element93 = getelementptr inbounds i8, ptr %arrayinit.element92, i32 1
+  store i8 32, ptr %arrayinit.element93, align 1
+  %arrayinit.element94 = getelementptr inbounds i8, ptr %arrayinit.element93, i32 1
+  %a20 = load i8, ptr %new_val, align 1
+  store i8 %a20, ptr %arrayinit.element94, align 1
+  %arrayinit.element95 = getelementptr inbounds i8, ptr %arrayinit.element94, i32 1
+  store i8 32, ptr %arrayinit.element95, align 1
+  %arrayinit.element96 = getelementptr inbounds i8, ptr %arrayinit.element95, i32 1
+  %a21 = load i8, ptr %simd, align 1
+  store i8 %a21, ptr %arrayinit.element96, align 1
+  %arrayinit.element97 = getelementptr inbounds i8, ptr %arrayinit.element96, i32 1
+  store i8 -3, ptr %arrayinit.element97, align 1
+  %arrayinit.element98 = getelementptr inbounds i8, ptr %arrayinit.element97, i32 1
+  store i8 1, ptr %arrayinit.element98, align 1
+  %arrayinit.element99 = getelementptr inbounds i8, ptr %arrayinit.element98, i32 1
+  store i8 1, ptr %arrayinit.element99, align 1
+  %arrayinit.element100 = getelementptr inbounds i8, ptr %arrayinit.element99, i32 1
+  store i8 92, ptr %arrayinit.element100, align 1
+  %arrayinit.element101 = getelementptr inbounds i8, ptr %arrayinit.element100, i32 1
+  store i8 4, ptr %arrayinit.element101, align 1
+  %arrayinit.element102 = getelementptr inbounds i8, ptr %arrayinit.element101, i32 1
+  store i8 64, ptr %arrayinit.element102, align 1
+  %arrayinit.element103 = getelementptr inbounds i8, ptr %arrayinit.element102, i32 1
+  store i8 65, ptr %arrayinit.element103, align 1
+  %arrayinit.element104 = getelementptr inbounds i8, ptr %arrayinit.element103, i32 1
+  store i8 0, ptr %arrayinit.element104, align 1
+  %arrayinit.element105 = getelementptr inbounds i8, ptr %arrayinit.element104, i32 1
+  store i8 15, ptr %arrayinit.element105, align 1
+  %arrayinit.element106 = getelementptr inbounds i8, ptr %arrayinit.element105, i32 1
+  store i8 11, ptr %arrayinit.element106, align 1
+  %arrayinit.element107 = getelementptr inbounds i8, ptr %arrayinit.element106, i32 1
+  store i8 32, ptr %arrayinit.element107, align 1
+  %arrayinit.element108 = getelementptr inbounds i8, ptr %arrayinit.element107, i32 1
+  %a22 = load i8, ptr %old_val, align 1
+  store i8 %a22, ptr %arrayinit.element108, align 1
+  %arrayinit.element109 = getelementptr inbounds i8, ptr %arrayinit.element108, i32 1
+  store i8 32, ptr %arrayinit.element109, align 1
+  %arrayinit.element110 = getelementptr inbounds i8, ptr %arrayinit.element109, i32 1
+  %a23 = load i8, ptr %simd, align 1
+  store i8 %a23, ptr %arrayinit.element110, align 1
+  %arrayinit.element111 = getelementptr inbounds i8, ptr %arrayinit.element110, i32 1
+  store i8 -3, ptr %arrayinit.element111, align 1
+  %arrayinit.element112 = getelementptr inbounds i8, ptr %arrayinit.element111, i32 1
+  store i8 1, ptr %arrayinit.element112, align 1
+  %arrayinit.element113 = getelementptr inbounds i8, ptr %arrayinit.element112, i32 1
+  store i8 2, ptr %arrayinit.element113, align 1
+  %arrayinit.element114 = getelementptr inbounds i8, ptr %arrayinit.element113, i32 1
+  store i8 92, ptr %arrayinit.element114, align 1
+  %arrayinit.element115 = getelementptr inbounds i8, ptr %arrayinit.element114, i32 1
+  store i8 4, ptr %arrayinit.element115, align 1
+  %arrayinit.element116 = getelementptr inbounds i8, ptr %arrayinit.element115, i32 1
+  store i8 64, ptr %arrayinit.element116, align 1
+  %arrayinit.element117 = getelementptr inbounds i8, ptr %arrayinit.element116, i32 1
+  store i8 65, ptr %arrayinit.element117, align 1
+  %arrayinit.element118 = getelementptr inbounds i8, ptr %arrayinit.element117, i32 1
+  store i8 0, ptr %arrayinit.element118, align 1
+  %arrayinit.element119 = getelementptr inbounds i8, ptr %arrayinit.element118, i32 1
+  store i8 15, ptr %arrayinit.element119, align 1
+  %arrayinit.element120 = getelementptr inbounds i8, ptr %arrayinit.element119, i32 1
+  store i8 11, ptr %arrayinit.element120, align 1
+  %arrayinit.element121 = getelementptr inbounds i8, ptr %arrayinit.element120, i32 1
+  store i8 32, ptr %arrayinit.element121, align 1
+  %arrayinit.element122 = getelementptr inbounds i8, ptr %arrayinit.element121, i32 1
+  %a24 = load i8, ptr %old_val, align 1
+  store i8 %a24, ptr %arrayinit.element122, align 1
+  %arrayinit.element123 = getelementptr inbounds i8, ptr %arrayinit.element122, i32 1
+  store i8 32, ptr %arrayinit.element123, align 1
+  %arrayinit.element124 = getelementptr inbounds i8, ptr %arrayinit.element123, i32 1
+  %a25 = load i8, ptr %simd, align 1
+  store i8 %a25, ptr %arrayinit.element124, align 1
+  %arrayinit.element125 = getelementptr inbounds i8, ptr %arrayinit.element124, i32 1
+  store i8 -3, ptr %arrayinit.element125, align 1
+  %arrayinit.element126 = getelementptr inbounds i8, ptr %arrayinit.element125, i32 1
+  store i8 1, ptr %arrayinit.element126, align 1
+  %arrayinit.element127 = getelementptr inbounds i8, ptr %arrayinit.element126, i32 1
+  store i8 3, ptr %arrayinit.element127, align 1
+  %arrayinit.element128 = getelementptr inbounds i8, ptr %arrayinit.element127, i32 1
+  store i8 92, ptr %arrayinit.element128, align 1
+  %arrayinit.element129 = getelementptr inbounds i8, ptr %arrayinit.element128, i32 1
+  store i8 4, ptr %arrayinit.element129, align 1
+  %arrayinit.element130 = getelementptr inbounds i8, ptr %arrayinit.element129, i32 1
+  store i8 64, ptr %arrayinit.element130, align 1
+  %arrayinit.element131 = getelementptr inbounds i8, ptr %arrayinit.element130, i32 1
+  store i8 65, ptr %arrayinit.element131, align 1
+  %arrayinit.element132 = getelementptr inbounds i8, ptr %arrayinit.element131, i32 1
+  store i8 0, ptr %arrayinit.element132, align 1
+  %arrayinit.element133 = getelementptr inbounds i8, ptr %arrayinit.element132, i32 1
+  store i8 15, ptr %arrayinit.element133, align 1
+  %arrayinit.element134 = getelementptr inbounds i8, ptr %arrayinit.element133, i32 1
+  store i8 11, ptr %arrayinit.element134, align 1
+  %arrayinit.element135 = getelementptr inbounds i8, ptr %arrayinit.element134, i32 1
+  store i8 32, ptr %arrayinit.element135, align 1
+  %arrayinit.element136 = getelementptr inbounds i8, ptr %arrayinit.element135, i32 1
+  %a26 = load i8, ptr %simd, align 1
+  store i8 %a26, ptr %arrayinit.element136, align 1
+  %arrayinit.element137 = getelementptr inbounds i8, ptr %arrayinit.element136, i32 1
+  store i8 32, ptr %arrayinit.element137, align 1
+  %arrayinit.element138 = getelementptr inbounds i8, ptr %arrayinit.element137, i32 1
+  %a27 = load i8, ptr %new_val, align 1
+  store i8 %a27, ptr %arrayinit.element138, align 1
+  %arrayinit.element139 = getelementptr inbounds i8, ptr %arrayinit.element138, i32 1
+  store i8 -3, ptr %arrayinit.element139, align 1
+  %arrayinit.element140 = getelementptr inbounds i8, ptr %arrayinit.element139, i32 1
+  store i8 2, ptr %arrayinit.element140, align 1
+  %arrayinit.element141 = getelementptr inbounds i8, ptr %arrayinit.element140, i32 1
+  store i8 2, ptr %arrayinit.element141, align 1
+  %arrayinit.element142 = getelementptr inbounds i8, ptr %arrayinit.element141, i32 1
+  store i8 33, ptr %arrayinit.element142, align 1
+  %arrayinit.element143 = getelementptr inbounds i8, ptr %arrayinit.element142, i32 1
+  %a28 = load i8, ptr %simd, align 1
+  store i8 %a28, ptr %arrayinit.element143, align 1
+  %arrayinit.element144 = getelementptr inbounds i8, ptr %arrayinit.element143, i32 1
+  store i8 32, ptr %arrayinit.element144, align 1
+  %arrayinit.element145 = getelementptr inbounds i8, ptr %arrayinit.element144, i32 1
+  %a29 = load i8, ptr %new_val, align 1
+  store i8 %a29, ptr %arrayinit.element145, align 1
+  %arrayinit.element146 = getelementptr inbounds i8, ptr %arrayinit.element145, i32 1
+  store i8 32, ptr %arrayinit.element146, align 1
+  %arrayinit.element147 = getelementptr inbounds i8, ptr %arrayinit.element146, i32 1
+  %a30 = load i8, ptr %simd, align 1
+  store i8 %a30, ptr %arrayinit.element147, align 1
+  %arrayinit.element148 = getelementptr inbounds i8, ptr %arrayinit.element147, i32 1
+  store i8 -3, ptr %arrayinit.element148, align 1
+  %arrayinit.element149 = getelementptr inbounds i8, ptr %arrayinit.element148, i32 1
+  store i8 1, ptr %arrayinit.element149, align 1
+  %arrayinit.element150 = getelementptr inbounds i8, ptr %arrayinit.element149, i32 1
+  store i8 0, ptr %arrayinit.element150, align 1
+  %arrayinit.element151 = getelementptr inbounds i8, ptr %arrayinit.element150, i32 1
+  store i8 92, ptr %arrayinit.element151, align 1
+  %arrayinit.element152 = getelementptr inbounds i8, ptr %arrayinit.element151, i32 1
+  store i8 4, ptr %arrayinit.element152, align 1
+  %arrayinit.element153 = getelementptr inbounds i8, ptr %arrayinit.element152, i32 1
+  store i8 64, ptr %arrayinit.element153, align 1
+  %arrayinit.element154 = getelementptr inbounds i8, ptr %arrayinit.element153, i32 1
+  store i8 65, ptr %arrayinit.element154, align 1
+  %arrayinit.element155 = getelementptr inbounds i8, ptr %arrayinit.element154, i32 1
+  store i8 0, ptr %arrayinit.element155, align 1
+  %arrayinit.element156 = getelementptr inbounds i8, ptr %arrayinit.element155, i32 1
+  store i8 15, ptr %arrayinit.element156, align 1
+  %arrayinit.element157 = getelementptr inbounds i8, ptr %arrayinit.element156, i32 1
+  store i8 11, ptr %arrayinit.element157, align 1
+  %arrayinit.element158 = getelementptr inbounds i8, ptr %arrayinit.element157, i32 1
+  store i8 32, ptr %arrayinit.element158, align 1
+  %arrayinit.element159 = getelementptr inbounds i8, ptr %arrayinit.element158, i32 1
+  %a31 = load i8, ptr %new_val, align 1
+  store i8 %a31, ptr %arrayinit.element159, align 1
+  %arrayinit.element160 = getelementptr inbounds i8, ptr %arrayinit.element159, i32 1
+  store i8 32, ptr %arrayinit.element160, align 1
+  %arrayinit.element161 = getelementptr inbounds i8, ptr %arrayinit.element160, i32 1
+  %a32 = load i8, ptr %simd, align 1
+  store i8 %a32, ptr %arrayinit.element161, align 1
+  %arrayinit.element162 = getelementptr inbounds i8, ptr %arrayinit.element161, i32 1
+  store i8 -3, ptr %arrayinit.element162, align 1
+  %arrayinit.element163 = getelementptr inbounds i8, ptr %arrayinit.element162, i32 1
+  store i8 1, ptr %arrayinit.element163, align 1
+  %arrayinit.element164 = getelementptr inbounds i8, ptr %arrayinit.element163, i32 1
+  store i8 1, ptr %arrayinit.element164, align 1
+  %arrayinit.element165 = getelementptr inbounds i8, ptr %arrayinit.element164, i32 1
+  store i8 92, ptr %arrayinit.element165, align 1
+  %arrayinit.element166 = getelementptr inbounds i8, ptr %arrayinit.element165, i32 1
+  store i8 4, ptr %arrayinit.element166, align 1
+  %arrayinit.element167 = getelementptr inbounds i8, ptr %arrayinit.element166, i32 1
+  store i8 64, ptr %arrayinit.element167, align 1
+  %arrayinit.element168 = getelementptr inbounds i8, ptr %arrayinit.element167, i32 1
+  store i8 65, ptr %arrayinit.element168, align 1
+  %arrayinit.element169 = getelementptr inbounds i8, ptr %arrayinit.element168, i32 1
+  store i8 0, ptr %arrayinit.element169, align 1
+  %arrayinit.element170 = getelementptr inbounds i8, ptr %arrayinit.element169, i32 1
+  store i8 15, ptr %arrayinit.element170, align 1
+  %arrayinit.element171 = getelementptr inbounds i8, ptr %arrayinit.element170, i32 1
+  store i8 11, ptr %arrayinit.element171, align 1
+  %arrayinit.element172 = getelementptr inbounds i8, ptr %arrayinit.element171, i32 1
+  store i8 32, ptr %arrayinit.element172, align 1
+  %arrayinit.element173 = getelementptr inbounds i8, ptr %arrayinit.element172, i32 1
+  %a33 = load i8, ptr %new_val, align 1
+  store i8 %a33, ptr %arrayinit.element173, align 1
+  %arrayinit.element174 = getelementptr inbounds i8, ptr %arrayinit.element173, i32 1
+  store i8 32, ptr %arrayinit.element174, align 1
+  %arrayinit.element175 = getelementptr inbounds i8, ptr %arrayinit.element174, i32 1
+  %a34 = load i8, ptr %simd, align 1
+  store i8 %a34, ptr %arrayinit.element175, align 1
+  %arrayinit.element176 = getelementptr inbounds i8, ptr %arrayinit.element175, i32 1
+  store i8 -3, ptr %arrayinit.element176, align 1
+  %arrayinit.element177 = getelementptr inbounds i8, ptr %arrayinit.element176, i32 1
+  store i8 1, ptr %arrayinit.element177, align 1
+  %arrayinit.element178 = getelementptr inbounds i8, ptr %arrayinit.element177, i32 1
+  store i8 2, ptr %arrayinit.element178, align 1
+  %arrayinit.element179 = getelementptr inbounds i8, ptr %arrayinit.element178, i32 1
+  store i8 92, ptr %arrayinit.element179, align 1
+  %arrayinit.element180 = getelementptr inbounds i8, ptr %arrayinit.element179, i32 1
+  store i8 4, ptr %arrayinit.element180, align 1
+  %arrayinit.element181 = getelementptr inbounds i8, ptr %arrayinit.element180, i32 1
+  store i8 64, ptr %arrayinit.element181, align 1
+  %arrayinit.element182 = getelementptr inbounds i8, ptr %arrayinit.element181, i32 1
+  store i8 65, ptr %arrayinit.element182, align 1
+  %arrayinit.element183 = getelementptr inbounds i8, ptr %arrayinit.element182, i32 1
+  store i8 0, ptr %arrayinit.element183, align 1
+  %arrayinit.element184 = getelementptr inbounds i8, ptr %arrayinit.element183, i32 1
+  store i8 15, ptr %arrayinit.element184, align 1
+  %arrayinit.element185 = getelementptr inbounds i8, ptr %arrayinit.element184, i32 1
+  store i8 11, ptr %arrayinit.element185, align 1
+  %arrayinit.element186 = getelementptr inbounds i8, ptr %arrayinit.element185, i32 1
+  store i8 32, ptr %arrayinit.element186, align 1
+  %arrayinit.element187 = getelementptr inbounds i8, ptr %arrayinit.element186, i32 1
+  %a35 = load i8, ptr %old_val, align 1
+  store i8 %a35, ptr %arrayinit.element187, align 1
+  %arrayinit.element188 = getelementptr inbounds i8, ptr %arrayinit.element187, i32 1
+  store i8 32, ptr %arrayinit.element188, align 1
+  %arrayinit.element189 = getelementptr inbounds i8, ptr %arrayinit.element188, i32 1
+  %a36 = load i8, ptr %simd, align 1
+  store i8 %a36, ptr %arrayinit.element189, align 1
+  %arrayinit.element190 = getelementptr inbounds i8, ptr %arrayinit.element189, i32 1
+  store i8 -3, ptr %arrayinit.element190, align 1
+  %arrayinit.element191 = getelementptr inbounds i8, ptr %arrayinit.element190, i32 1
+  store i8 1, ptr %arrayinit.element191, align 1
+  %arrayinit.element192 = getelementptr inbounds i8, ptr %arrayinit.element191, i32 1
+  store i8 3, ptr %arrayinit.element192, align 1
+  %arrayinit.element193 = getelementptr inbounds i8, ptr %arrayinit.element192, i32 1
+  store i8 92, ptr %arrayinit.element193, align 1
+  %arrayinit.element194 = getelementptr inbounds i8, ptr %arrayinit.element193, i32 1
+  store i8 4, ptr %arrayinit.element194, align 1
+  %arrayinit.element195 = getelementptr inbounds i8, ptr %arrayinit.element194, i32 1
+  store i8 64, ptr %arrayinit.element195, align 1
+  %arrayinit.element196 = getelementptr inbounds i8, ptr %arrayinit.element195, i32 1
+  store i8 65, ptr %arrayinit.element196, align 1
+  %arrayinit.element197 = getelementptr inbounds i8, ptr %arrayinit.element196, i32 1
+  store i8 0, ptr %arrayinit.element197, align 1
+  %arrayinit.element198 = getelementptr inbounds i8, ptr %arrayinit.element197, i32 1
+  store i8 15, ptr %arrayinit.element198, align 1
+  %arrayinit.element199 = getelementptr inbounds i8, ptr %arrayinit.element198, i32 1
+  store i8 11, ptr %arrayinit.element199, align 1
+  %arrayinit.element200 = getelementptr inbounds i8, ptr %arrayinit.element199, i32 1
+  store i8 32, ptr %arrayinit.element200, align 1
+  %arrayinit.element201 = getelementptr inbounds i8, ptr %arrayinit.element200, i32 1
+  %a37 = load i8, ptr %simd, align 1
+  store i8 %a37, ptr %arrayinit.element201, align 1
+  %arrayinit.element202 = getelementptr inbounds i8, ptr %arrayinit.element201, i32 1
+  store i8 32, ptr %arrayinit.element202, align 1
+  %arrayinit.element203 = getelementptr inbounds i8, ptr %arrayinit.element202, i32 1
+  %a38 = load i8, ptr %new_val, align 1
+  store i8 %a38, ptr %arrayinit.element203, align 1
+  %arrayinit.element204 = getelementptr inbounds i8, ptr %arrayinit.element203, i32 1
+  store i8 -3, ptr %arrayinit.element204, align 1
+  %arrayinit.element205 = getelementptr inbounds i8, ptr %arrayinit.element204, i32 1
+  store i8 2, ptr %arrayinit.element205, align 1
+  %arrayinit.element206 = getelementptr inbounds i8, ptr %arrayinit.element205, i32 1
+  store i8 3, ptr %arrayinit.element206, align 1
+  %arrayinit.element207 = getelementptr inbounds i8, ptr %arrayinit.element206, i32 1
+  store i8 33, ptr %arrayinit.element207, align 1
+  %arrayinit.element208 = getelementptr inbounds i8, ptr %arrayinit.element207, i32 1
+  %a39 = load i8, ptr %simd, align 1
+  store i8 %a39, ptr %arrayinit.element208, align 1
+  %arrayinit.element209 = getelementptr inbounds i8, ptr %arrayinit.element208, i32 1
+  store i8 32, ptr %arrayinit.element209, align 1
+  %arrayinit.element210 = getelementptr inbounds i8, ptr %arrayinit.element209, i32 1
+  %a40 = load i8, ptr %new_val, align 1
+  store i8 %a40, ptr %arrayinit.element210, align 1
+  %arrayinit.element211 = getelementptr inbounds i8, ptr %arrayinit.element210, i32 1
+  store i8 32, ptr %arrayinit.element211, align 1
+  %arrayinit.element212 = getelementptr inbounds i8, ptr %arrayinit.element211, i32 1
+  %a41 = load i8, ptr %simd, align 1
+  store i8 %a41, ptr %arrayinit.element212, align 1
+  %arrayinit.element213 = getelementptr inbounds i8, ptr %arrayinit.element212, i32 1
+  store i8 -3, ptr %arrayinit.element213, align 1
+  %arrayinit.element214 = getelementptr inbounds i8, ptr %arrayinit.element213, i32 1
+  store i8 1, ptr %arrayinit.element214, align 1
+  %arrayinit.element215 = getelementptr inbounds i8, ptr %arrayinit.element214, i32 1
+  store i8 0, ptr %arrayinit.element215, align 1
+  %arrayinit.element216 = getelementptr inbounds i8, ptr %arrayinit.element215, i32 1
+  store i8 92, ptr %arrayinit.element216, align 1
+  %arrayinit.element217 = getelementptr inbounds i8, ptr %arrayinit.element216, i32 1
+  store i8 4, ptr %arrayinit.element217, align 1
+  %arrayinit.element218 = getelementptr inbounds i8, ptr %arrayinit.element217, i32 1
+  store i8 64, ptr %arrayinit.element218, align 1
+  %arrayinit.element219 = getelementptr inbounds i8, ptr %arrayinit.element218, i32 1
+  store i8 65, ptr %arrayinit.element219, align 1
+  %arrayinit.element220 = getelementptr inbounds i8, ptr %arrayinit.element219, i32 1
+  store i8 0, ptr %arrayinit.element220, align 1
+  %arrayinit.element221 = getelementptr inbounds i8, ptr %arrayinit.element220, i32 1
+  store i8 15, ptr %arrayinit.element221, align 1
+  %arrayinit.element222 = getelementptr inbounds i8, ptr %arrayinit.element221, i32 1
+  store i8 11, ptr %arrayinit.element222, align 1
+  %arrayinit.element223 = getelementptr inbounds i8, ptr %arrayinit.element222, i32 1
+  store i8 32, ptr %arrayinit.element223, align 1
+  %arrayinit.element224 = getelementptr inbounds i8, ptr %arrayinit.element223, i32 1
+  %a42 = load i8, ptr %new_val, align 1
+  store i8 %a42, ptr %arrayinit.element224, align 1
+  %arrayinit.element225 = getelementptr inbounds i8, ptr %arrayinit.element224, i32 1
+  store i8 32, ptr %arrayinit.element225, align 1
+  %arrayinit.element226 = getelementptr inbounds i8, ptr %arrayinit.element225, i32 1
+  %a43 = load i8, ptr %simd, align 1
+  store i8 %a43, ptr %arrayinit.element226, align 1
+  %arrayinit.element227 = getelementptr inbounds i8, ptr %arrayinit.element226, i32 1
+  store i8 -3, ptr %arrayinit.element227, align 1
+  %arrayinit.element228 = getelementptr inbounds i8, ptr %arrayinit.element227, i32 1
+  store i8 1, ptr %arrayinit.element228, align 1
+  %arrayinit.element229 = getelementptr inbounds i8, ptr %arrayinit.element228, i32 1
+  store i8 1, ptr %arrayinit.element229, align 1
+  %arrayinit.element230 = getelementptr inbounds i8, ptr %arrayinit.element229, i32 1
+  store i8 92, ptr %arrayinit.element230, align 1
+  %arrayinit.element231 = getelementptr inbounds i8, ptr %arrayinit.element230, i32 1
+  store i8 4, ptr %arrayinit.element231, align 1
+  %arrayinit.element232 = getelementptr inbounds i8, ptr %arrayinit.element231, i32 1
+  store i8 64, ptr %arrayinit.element232, align 1
+  %arrayinit.element233 = getelementptr inbounds i8, ptr %arrayinit.element232, i32 1
+  store i8 65, ptr %arrayinit.element233, align 1
+  %arrayinit.element234 = getelementptr inbounds i8, ptr %arrayinit.element233, i32 1
+  store i8 0, ptr %arrayinit.element234, align 1
+  %arrayinit.element235 = getelementptr inbounds i8, ptr %arrayinit.element234, i32 1
+  store i8 15, ptr %arrayinit.element235, align 1
+  %arrayinit.element236 = getelementptr inbounds i8, ptr %arrayinit.element235, i32 1
+  store i8 11, ptr %arrayinit.element236, align 1
+  %arrayinit.element237 = getelementptr inbounds i8, ptr %arrayinit.element236, i32 1
+  store i8 32, ptr %arrayinit.element237, align 1
+  %arrayinit.element238 = getelementptr inbounds i8, ptr %arrayinit.element237, i32 1
+  %a44 = load i8, ptr %new_val, align 1
+  store i8 %a44, ptr %arrayinit.element238, align 1
+  %arrayinit.element239 = getelementptr inbounds i8, ptr %arrayinit.element238, i32 1
+  store i8 32, ptr %arrayinit.element239, align 1
+  %arrayinit.element240 = getelementptr inbounds i8, ptr %arrayinit.element239, i32 1
+  %a45 = load i8, ptr %simd, align 1
+  store i8 %a45, ptr %arrayinit.element240, align 1
+  %arrayinit.element241 = getelementptr inbounds i8, ptr %arrayinit.element240, i32 1
+  store i8 -3, ptr %arrayinit.element241, align 1
+  %arrayinit.element242 = getelementptr inbounds i8, ptr %arrayinit.element241, i32 1
+  store i8 1, ptr %arrayinit.element242, align 1
+  %arrayinit.element243 = getelementptr inbounds i8, ptr %arrayinit.element242, i32 1
+  store i8 2, ptr %arrayinit.element243, align 1
+  %arrayinit.element244 = getelementptr inbounds i8, ptr %arrayinit.element243, i32 1
+  store i8 92, ptr %arrayinit.element244, align 1
+  %arrayinit.element245 = getelementptr inbounds i8, ptr %arrayinit.element244, i32 1
+  store i8 4, ptr %arrayinit.element245, align 1
+  %arrayinit.element246 = getelementptr inbounds i8, ptr %arrayinit.element245, i32 1
+  store i8 64, ptr %arrayinit.element246, align 1
+  %arrayinit.element247 = getelementptr inbounds i8, ptr %arrayinit.element246, i32 1
+  store i8 65, ptr %arrayinit.element247, align 1
+  %arrayinit.element248 = getelementptr inbounds i8, ptr %arrayinit.element247, i32 1
+  store i8 0, ptr %arrayinit.element248, align 1
+  %arrayinit.element249 = getelementptr inbounds i8, ptr %arrayinit.element248, i32 1
+  store i8 15, ptr %arrayinit.element249, align 1
+  %arrayinit.element250 = getelementptr inbounds i8, ptr %arrayinit.element249, i32 1
+  store i8 11, ptr %arrayinit.element250, align 1
+  %arrayinit.element251 = getelementptr inbounds i8, ptr %arrayinit.element250, i32 1
+  store i8 32, ptr %arrayinit.element251, align 1
+  %arrayinit.element252 = getelementptr inbounds i8, ptr %arrayinit.element251, i32 1
+  %a46 = load i8, ptr %new_val, align 1
+  store i8 %a46, ptr %arrayinit.element252, align 1
+  %arrayinit.element253 = getelementptr inbounds i8, ptr %arrayinit.element252, i32 1
+  store i8 32, ptr %arrayinit.element253, align 1
+  %arrayinit.element254 = getelementptr inbounds i8, ptr %arrayinit.element253, i32 1
+  %a47 = load i8, ptr %simd, align 1
+  store i8 %a47, ptr %arrayinit.element254, align 1
+  %arrayinit.element255 = getelementptr inbounds i8, ptr %arrayinit.element254, i32 1
+  store i8 -3, ptr %arrayinit.element255, align 1
+  %arrayinit.element256 = getelementptr inbounds i8, ptr %arrayinit.element255, i32 1
+  store i8 1, ptr %arrayinit.element256, align 1
+  %arrayinit.element257 = getelementptr inbounds i8, ptr %arrayinit.element256, i32 1
+  store i8 3, ptr %arrayinit.element257, align 1
+  %arrayinit.element258 = getelementptr inbounds i8, ptr %arrayinit.element257, i32 1
+  store i8 92, ptr %arrayinit.element258, align 1
+  %arrayinit.element259 = getelementptr inbounds i8, ptr %arrayinit.element258, i32 1
+  store i8 4, ptr %arrayinit.element259, align 1
+  %arrayinit.element260 = getelementptr inbounds i8, ptr %arrayinit.element259, i32 1
+  store i8 64, ptr %arrayinit.element260, align 1
+  %arrayinit.element261 = getelementptr inbounds i8, ptr %arrayinit.element260, i32 1
+  store i8 65, ptr %arrayinit.element261, align 1
+  %arrayinit.element262 = getelementptr inbounds i8, ptr %arrayinit.element261, i32 1
+  store i8 0, ptr %arrayinit.element262, align 1
+  %arrayinit.element263 = getelementptr inbounds i8, ptr %arrayinit.element262, i32 1
+  store i8 15, ptr %arrayinit.element263, align 1
+  %arrayinit.element264 = getelementptr inbounds i8, ptr %arrayinit.element263, i32 1
+  store i8 11, ptr %arrayinit.element264, align 1
+  %arrayinit.element265 = getelementptr inbounds i8, ptr %arrayinit.element264, i32 1
+  store i8 65, ptr %arrayinit.element265, align 1
+  %arrayinit.element266 = getelementptr inbounds i8, ptr %arrayinit.element265, i32 1
+  store i8 1, ptr %arrayinit.element266, align 1
+  %arrayinit.element267 = getelementptr inbounds i8, ptr %arrayinit.element266, i32 1
+  store i8 15, ptr %arrayinit.element267, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %code, i32 269
+  call void @g(ptr %code, ptr %add.ptr)
   ret void
 }
 
-declare void @g(i8*, i8*)
+declare void @g(ptr, ptr)
 
 attributes #1 = { noinline nounwind optnone ssp uwtable }

diff  --git a/llvm/test/CodeGen/Mips/delay-slot-fill-forward.ll b/llvm/test/CodeGen/Mips/delay-slot-fill-forward.ll
index 6b9e9244ca7d5..7c41641560eb3 100644
--- a/llvm/test/CodeGen/Mips/delay-slot-fill-forward.ll
+++ b/llvm/test/CodeGen/Mips/delay-slot-fill-forward.ll
@@ -6,80 +6,80 @@
 ; This test was generated with bugpoint from
 ; MultiSource/Applications/JM/lencod/me_fullsearch.c
 
-%struct.SubImageContainer = type { i16****, [2 x i16****] }
+%struct.SubImageContainer = type { ptr, [2 x ptr] }
 %struct.storable_picture = type { i32, i32, i32, i32, i32, i32,
   [6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]], [6 x [33 x i64]],
   i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
-  i32, i32, i32, i32, i32, i16**, i16****, i16****, i16*****, i16***,
-  i8*, i8***, i64***, i64***, i16****, i8**, i8**, %struct.storable_picture*,
-  %struct.storable_picture*, %struct.storable_picture*,
+  i32, i32, i32, i32, i32, ptr, ptr, ptr, ptr, ptr,
+  ptr, ptr, ptr, ptr, ptr, ptr, ptr, ptr,
+  ptr, ptr,
   i32, i32, i32, i32, i32, i32, i32 }
 
 @img_height = external global i16, align 2
 @width_pad = external global i32, align 4
 @height_pad = external global i32, align 4
- at mvbits = external global i32*, align 4
+ at mvbits = external global ptr, align 4
 @ref_pic1_sub = external global %struct.SubImageContainer, align 4
 @ref_pic2_sub = external global %struct.SubImageContainer, align 4
- at wbp_weight = external global i32****, align 4
+ at wbp_weight = external global ptr, align 4
 @weight1 = external global i16, align 2
 @weight2 = external global i16, align 2
 @offsetBi = external global i16, align 2
- at computeBiPred2 = external global [3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*], align 4
- at computeBiPred = external global i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*, align 4
+ at computeBiPred2 = external global [3 x ptr], align 4
+ at computeBiPred = external global ptr, align 4
 @bipred2_access_method = external global i32, align 4
 @start_me_refinement_hp = external global i32, align 4
 
-declare i32 @foobar(i16*, i32 signext , i32 signext , i32 signext ,
+declare i32 @foobar(ptr, i32 signext , i32 signext , i32 signext ,
                     i32 signext , i32 signext , i32 signext , i32 signext ) #1
 
-define void @SubPelBlockSearchBiPred(i16* %orig_pic, i16 signext %ref,
+define void @SubPelBlockSearchBiPred(ptr %orig_pic, i16 signext %ref,
     i32 signext %pic_pix_x, i32 signext %pic_pix_y, i16 signext %pred_mv_y,
-    i16* nocapture %mv_x, i16* nocapture %mv_y, i16* nocapture readonly %s_mv_x,
+    ptr nocapture %mv_x, ptr nocapture %mv_y, ptr nocapture readonly %s_mv_x,
     i32 signext %search_pos2, i32 signext %min_mcost) #0 {
 ; CHECK-LABEL: SubPelBlockSearchBiPred:
 entry:
   %add40 = shl i32 %pic_pix_x, 2
   %shl = add i32 %add40, 80
   %add41 = shl i32 %pic_pix_y, 2
-  %0 = load i32, i32* @start_me_refinement_hp, align 4, !tbaa !1
+  %0 = load i32, ptr @start_me_refinement_hp, align 4, !tbaa !1
   %cond47 = select i1 undef, i32 1, i32 %search_pos2
-  %1 = load i16, i16* %s_mv_x, align 2, !tbaa !5
+  %1 = load i16, ptr %s_mv_x, align 2, !tbaa !5
   %conv48 = sext i16 %1 to i32
   %add49 = add nsw i32 %conv48, %shl
   %idxprom52 = sext i16 %ref to i32
-  %2 = load i32, i32* null, align 4, !tbaa !1
-  store i32 undef, i32* bitcast (%struct.SubImageContainer* @ref_pic1_sub to i32*), align 4, !tbaa !7
-  %3 = load i32, i32* undef, align 4, !tbaa !10
-  store i32 %3, i32* bitcast (%struct.SubImageContainer* @ref_pic2_sub to i32*), align 4, !tbaa !7
-  store i16 0, i16* @img_height, align 2, !tbaa !5
-  %size_x_pad = getelementptr inbounds %struct.storable_picture, %struct.storable_picture* null, i32 0, i32 22
-  %4 = load i32, i32* %size_x_pad, align 4, !tbaa !12
-  store i32 %4, i32* @width_pad, align 4, !tbaa !1
-  %5 = load i32, i32* undef, align 4, !tbaa !13
-  store i32 %5, i32* @height_pad, align 4, !tbaa !1
-  %6 = load i32****, i32***** @wbp_weight, align 4, !tbaa !14
-  %arrayidx75 = getelementptr inbounds i32***, i32**** %6, i32 undef
-  %7 = load i32***, i32**** %arrayidx75, align 4, !tbaa !14
-  %arrayidx76 = getelementptr inbounds i32**, i32*** %7, i32 %idxprom52
-  %8 = load i32**, i32*** %arrayidx76, align 4, !tbaa !14
-  %cond87.in671 = load i32*, i32** %8, align 4
-  %cond87672 = load i32, i32* %cond87.in671, align 4
+  %2 = load i32, ptr null, align 4, !tbaa !1
+  store i32 undef, ptr @ref_pic1_sub, align 4, !tbaa !7
+  %3 = load i32, ptr undef, align 4, !tbaa !10
+  store i32 %3, ptr @ref_pic2_sub, align 4, !tbaa !7
+  store i16 0, ptr @img_height, align 2, !tbaa !5
+  %size_x_pad = getelementptr inbounds %struct.storable_picture, ptr null, i32 0, i32 22
+  %4 = load i32, ptr %size_x_pad, align 4, !tbaa !12
+  store i32 %4, ptr @width_pad, align 4, !tbaa !1
+  %5 = load i32, ptr undef, align 4, !tbaa !13
+  store i32 %5, ptr @height_pad, align 4, !tbaa !1
+  %6 = load ptr, ptr @wbp_weight, align 4, !tbaa !14
+  %arrayidx75 = getelementptr inbounds ptr, ptr %6, i32 undef
+  %7 = load ptr, ptr %arrayidx75, align 4, !tbaa !14
+  %arrayidx76 = getelementptr inbounds ptr, ptr %7, i32 %idxprom52
+  %8 = load ptr, ptr %arrayidx76, align 4, !tbaa !14
+  %cond87.in671 = load ptr, ptr %8, align 4
+  %cond87672 = load i32, ptr %cond87.in671, align 4
   %conv88673 = trunc i32 %cond87672 to i16
-  store i16 %conv88673, i16* @weight1, align 2, !tbaa !5
-  %cond105 = load i32, i32* undef, align 4
+  store i16 %conv88673, ptr @weight1, align 2, !tbaa !5
+  %cond105 = load i32, ptr undef, align 4
   %conv106 = trunc i32 %cond105 to i16
-  store i16 %conv106, i16* @weight2, align 2, !tbaa !5
-  store i16 0, i16* @offsetBi, align 2, !tbaa !5
-  %storemerge655 = load i32, i32* bitcast (i32 (i16*, i32, i32, i32, i32, i32, i32, i32)** getelementptr inbounds ([3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*], [3 x i32 (i16*, i32, i32, i32, i32, i32, i32, i32)*]* @computeBiPred2, i32 0, i32 1) to i32*), align 4
-  store i32 %storemerge655, i32* bitcast (i32 (i16*, i32, i32, i32, i32, i32, i32, i32)** @computeBiPred to i32*), align 4, !tbaa !14
-  %9 = load i16, i16* %mv_x, align 2, !tbaa !5
+  store i16 %conv106, ptr @weight2, align 2, !tbaa !5
+  store i16 0, ptr @offsetBi, align 2, !tbaa !5
+  %storemerge655 = load i32, ptr getelementptr inbounds ([3 x ptr], ptr @computeBiPred2, i32 0, i32 1), align 4
+  store i32 %storemerge655, ptr @computeBiPred, align 4, !tbaa !14
+  %9 = load i16, ptr %mv_x, align 2, !tbaa !5
   %cmp270 = icmp sgt i32 undef, 1
   %or.cond = and i1 %cmp270, false
   br i1 %or.cond, label %land.lhs.true277, label %if.else289
 
 land.lhs.true277:                                 ; preds = %entry
-  %10 = load i16, i16* %mv_y, align 2, !tbaa !5
+  %10 = load i16, ptr %mv_y, align 2, !tbaa !5
   %conv278 = sext i16 %10 to i32
   %add279 = add nsw i32 %conv278, 0
   %cmp280 = icmp sgt i32 %add279, 1
@@ -91,7 +91,7 @@ if.else289:                                       ; preds = %land.lhs.true277, %
 
 if.end290:                                        ; preds = %if.else289, %land.lhs.true277
   %storemerge = phi i32 [ 1, %if.else289 ], [ 0, %land.lhs.true277 ]
-  store i32 %storemerge, i32* @bipred2_access_method, align 4, !tbaa !1
+  store i32 %storemerge, ptr @bipred2_access_method, align 4, !tbaa !1
   %cmp315698 = icmp slt i32 %0, %cond47
   br i1 %cmp315698, label %for.body.lr.ph, label %if.end358
 
@@ -106,14 +106,14 @@ for.body:                                         ; preds = %for.inc, %for.body.
   %best_pos.0699 = phi i32 [ 0, %for.body.lr.ph ], [ %best_pos.1, %for.inc ]
   %conv317 = sext i16 %11 to i32
   %add320 = add nsw i32 0, %conv317
-  %12 = load i16, i16* %mv_y, align 2, !tbaa !5
+  %12 = load i16, ptr %mv_y, align 2, !tbaa !5
   %conv321 = sext i16 %12 to i32
   %add324 = add nsw i32 0, %conv321
-  %13 = load i32*, i32** @mvbits, align 4, !tbaa !14
-  %14 = load i32, i32* undef, align 4, !tbaa !1
+  %13 = load ptr, ptr @mvbits, align 4, !tbaa !14
+  %14 = load i32, ptr undef, align 4, !tbaa !1
   %sub329 = sub nsw i32 %add324, %conv328
-  %arrayidx330 = getelementptr inbounds i32, i32* %13, i32 %sub329
-  %15 = load i32, i32* %arrayidx330, align 4, !tbaa !1
+  %arrayidx330 = getelementptr inbounds i32, ptr %13, i32 %sub329
+  %15 = load i32, ptr %arrayidx330, align 4, !tbaa !1
   %add331 = add nsw i32 %15, %14
   %mul = mul nsw i32 %add331, %2
   %shr332 = ashr i32 %mul, 16
@@ -126,7 +126,7 @@ if.end336:                                        ; preds = %for.body
   ; CHECK:      j     $BB{{.*}}
   %add337 = add nsw i32 %add320, %shl
   %add338 = add nsw i32 %add324, 0
-  %call340 = tail call i32 undef(i16* %orig_pic, i32 signext undef, i32 signext
+  %call340 = tail call i32 undef(ptr %orig_pic, i32 signext undef, i32 signext
                                  undef, i32 signext 0, i32 signext %add49,
                                  i32 signext undef, i32 signext %add337,
                                  i32 signext %add338) #1
@@ -136,7 +136,7 @@ if.end336:                                        ; preds = %for.body
 
 for.inc:                                          ; preds = %if.end336, %for.body
   %best_pos.1 = phi i32 [ %best_pos.0699, %for.body ], [ %pos.0.best_pos.0, %if.end336 ]
-  %.pre = load i16, i16* %mv_x, align 2, !tbaa !5
+  %.pre = load i16, ptr %mv_x, align 2, !tbaa !5
   br label %for.body
 
 if.end358:                                        ; preds = %if.end290
@@ -144,7 +144,7 @@ if.end358:                                        ; preds = %if.end290
   br i1 undef, label %for.body415.lr.ph, label %if.end461
 
 for.body415.lr.ph:                                ; preds = %if.end358
-  %16 = load i16, i16* %mv_y, align 2, !tbaa !5
+  %16 = load i16, ptr %mv_y, align 2, !tbaa !5
   %conv420 = sext i16 %16 to i32
   %add423 = add nsw i32 0, %conv420
   %cmp433 = icmp sgt i32 %.min_mcost.addr.0, 0
@@ -152,7 +152,7 @@ for.body415.lr.ph:                                ; preds = %if.end358
 
 if.end436:                                        ; preds = %for.body415.lr.ph
   %add438 = add nsw i32 %add423, 0
-  %call440 = tail call i32 @foobar(i16* %orig_pic, i32 signext undef, i32 signext undef,
+  %call440 = tail call i32 @foobar(ptr %orig_pic, i32 signext undef, i32 signext undef,
                                  i32 signext 0, i32 signext %add49, i32 signext undef,
                                  i32 signext undef, i32 signext %add438) #1
   br label %if.end461

diff  --git a/llvm/test/CodeGen/Mips/dins.ll b/llvm/test/CodeGen/Mips/dins.ll
index 79364ca675b0c..02a8efd5c0382 100644
--- a/llvm/test/CodeGen/Mips/dins.ll
+++ b/llvm/test/CodeGen/Mips/dins.ll
@@ -34,28 +34,28 @@ define i64 @f123(i64 inreg %bufptr.coerce0, i64 inreg %bufptr.coerce1) local_unn
 entry:
   %bufptr.sroa.0 = alloca i64, align 8
   %bufptr.sroa.4 = alloca i64, align 8
-  store i64 %bufptr.coerce0, i64* %bufptr.sroa.0, align 8
-  store i64 %bufptr.coerce1, i64* %bufptr.sroa.4, align 8
-  %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load = load volatile i64, i64* %bufptr.sroa.0, align 8
+  store i64 %bufptr.coerce0, ptr %bufptr.sroa.0, align 8
+  store i64 %bufptr.coerce1, ptr %bufptr.sroa.4, align 8
+  %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load = load volatile i64, ptr %bufptr.sroa.0, align 8
   %bf.clear = and i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load, 134217727
   %bf.set = or i64 %bf.clear, 16508780544
-  store volatile i64 %bf.set, i64* %bufptr.sroa.0, align 8
-  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2 = load volatile i64, i64* %bufptr.sroa.4, align 8
+  store volatile i64 %bf.set, ptr %bufptr.sroa.0, align 8
+  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2 = load volatile i64, ptr %bufptr.sroa.4, align 8
   %bf.clear3 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load2, -16911433729
   %bf.set4 = or i64 %bf.clear3, 1073741824
-  store volatile i64 %bf.set4, i64* %bufptr.sroa.4, align 8
-  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6 = load volatile i64, i64* %bufptr.sroa.4, align 8
+  store volatile i64 %bf.set4, ptr %bufptr.sroa.4, align 8
+  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6 = load volatile i64, ptr %bufptr.sroa.4, align 8
   %bf.clear7 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load6, 1125899906842623
   %bf.set8 = or i64 %bf.clear7, 5629499534213120
-  store volatile i64 %bf.set8, i64* %bufptr.sroa.4, align 8
-  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11 = load volatile i64, i64* %bufptr.sroa.4, align 8
+  store volatile i64 %bf.set8, ptr %bufptr.sroa.4, align 8
+  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11 = load volatile i64, ptr %bufptr.sroa.4, align 8
   %bf.lshr = lshr i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load11, 50
-  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13 = load volatile i64, i64* %bufptr.sroa.4, align 8
+  %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13 = load volatile i64, ptr %bufptr.sroa.4, align 8
   %bf.shl = shl nuw nsw i64 %bf.lshr, 34
   %bf.clear14 = and i64 %bufptr.sroa.4.0.bufptr.sroa.4.0.bufptr.sroa.4.8.bf.load13, -1125882726973441
   %bf.set15 = or i64 %bf.clear14, %bf.shl
-  store volatile i64 %bf.set15, i64* %bufptr.sroa.4, align 8
-  %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17 = load volatile i64, i64* %bufptr.sroa.0, align 8
+  store volatile i64 %bf.set15, ptr %bufptr.sroa.4, align 8
+  %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17 = load volatile i64, ptr %bufptr.sroa.0, align 8
   %bf.lshr18 = lshr i64 %bufptr.sroa.0.0.bufptr.sroa.0.0.bufptr.sroa.0.0.bf.load17, 27
   ret i64 %bf.lshr18
 }
@@ -84,11 +84,11 @@ entry:
 define i32 @foo(i32 signext %x) {
 entry:
   %x.addr = alloca i32, align 4
-  store volatile i32 %x, i32* %x.addr, align 4
-  %x.addr.0.x.addr.0. = load volatile i32, i32* %x.addr, align 4
+  store volatile i32 %x, ptr %x.addr, align 4
+  %x.addr.0.x.addr.0. = load volatile i32, ptr %x.addr, align 4
   %and = and i32 %x.addr.0.x.addr.0., -4
   %or = or i32 %and, 8
-  store volatile i32 %or, i32* %x.addr, align 4
+  store volatile i32 %or, ptr %x.addr, align 4
   ret i32 %and
 }
 

diff  --git a/llvm/test/CodeGen/Mips/disable-tail-merge.ll b/llvm/test/CodeGen/Mips/disable-tail-merge.ll
index 9396db7be7f66..188b83b68f5d3 100644
--- a/llvm/test/CodeGen/Mips/disable-tail-merge.ll
+++ b/llvm/test/CodeGen/Mips/disable-tail-merge.ll
@@ -9,25 +9,25 @@
 define i32 @test1(i32 %a) {
 entry:
   %tobool = icmp eq i32 %a, 0
-  %0 = load i32, i32* @g0, align 4
+  %0 = load i32, ptr @g0, align 4
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:
   %add = add nsw i32 %0, 1
-  store i32 %add, i32* @g0, align 4
-  %1 = load i32, i32* @g1, align 4
+  store i32 %add, ptr @g0, align 4
+  %1 = load i32, ptr @g1, align 4
   %add1 = add nsw i32 %1, 23
   br label %if.end
 
 if.else:
   %add2 = add nsw i32 %0, 11
-  store i32 %add2, i32* @g0, align 4
-  %2 = load i32, i32* @g1, align 4
+  store i32 %add2, ptr @g0, align 4
+  %2 = load i32, ptr @g1, align 4
   %add3 = add nsw i32 %2, 23
   br label %if.end
 
 if.end:
   %storemerge = phi i32 [ %add3, %if.else ], [ %add1, %if.then ]
-  store i32 %storemerge, i32* @g1, align 4
+  store i32 %storemerge, ptr @g1, align 4
   ret i32 %storemerge
 }

diff  --git a/llvm/test/CodeGen/Mips/div.ll b/llvm/test/CodeGen/Mips/div.ll
index 92258bce02d2f..839b2dee6d09e 100644
--- a/llvm/test/CodeGen/Mips/div.ll
+++ b/llvm/test/CodeGen/Mips/div.ll
@@ -6,12 +6,12 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %div = sdiv i32 %0, %1
 ; 16:	div	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}
-  store i32 %div, i32* @kkkk, align 4
+  store i32 %div, ptr @kkkk, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/div_rem.ll b/llvm/test/CodeGen/Mips/div_rem.ll
index be1e001a24c33..c8e22f262fb28 100644
--- a/llvm/test/CodeGen/Mips/div_rem.ll
+++ b/llvm/test/CodeGen/Mips/div_rem.ll
@@ -7,15 +7,15 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %div = sdiv i32 %0, %1
-  store i32 %div, i32* @kkkk, align 4
+  store i32 %div, ptr @kkkk, align 4
   %rem = srem i32 %0, %1
 ; 16:	div	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
-  store i32 %rem, i32* @llll, align 4
+  store i32 %rem, ptr @llll, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/divrem.ll b/llvm/test/CodeGen/Mips/divrem.ll
index 35bc96a0d03d6..b0dd49a062217 100644
--- a/llvm/test/CodeGen/Mips/divrem.ll
+++ b/llvm/test/CodeGen/Mips/divrem.ll
@@ -134,7 +134,7 @@ entry:
   ret i32 %rem
 }
 
-define i32 @sdivrem1(i32 signext %a0, i32 signext %a1, i32* nocapture %r) nounwind {
+define i32 @sdivrem1(i32 signext %a0, i32 signext %a1, ptr nocapture %r) nounwind {
 entry:
 ; ALL-LABEL: sdivrem1:
 
@@ -170,12 +170,12 @@ entry:
 ; ALL: .end sdivrem1
 
   %rem = srem i32 %a0, %a1
-  store i32 %rem, i32* %r, align 4
+  store i32 %rem, ptr %r, align 4
   %div = sdiv i32 %a0, %a1
   ret i32 %div
 }
 
-define i32 @udivrem1(i32 signext %a0, i32 signext %a1, i32* nocapture %r) nounwind {
+define i32 @udivrem1(i32 signext %a0, i32 signext %a1, ptr nocapture %r) nounwind {
 entry:
 ; ALL-LABEL: udivrem1:
 
@@ -212,7 +212,7 @@ entry:
 ; ALL: .end udivrem1
 
   %rem = urem i32 %a0, %a1
-  store i32 %rem, i32* %r, align 4
+  store i32 %rem, ptr %r, align 4
   %div = udiv i32 %a0, %a1
   ret i32 %div
 }
@@ -220,8 +220,8 @@ entry:
 ; FIXME: It's not clear what this is supposed to test.
 define i32 @killFlags() {
 entry:
-  %0 = load i32, i32* @g0, align 4
-  %1 = load i32, i32* @g1, align 4
+  %0 = load i32, ptr @g0, align 4
+  %1 = load i32, ptr @g1, align 4
   %div = sdiv i32 %0, %1
   ret i32 %div
 }
@@ -317,7 +317,7 @@ entry:
   ret i64 %rem
 }
 
-define i64 @sdivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+define i64 @sdivrem2(i64 %a0, i64 %a1, ptr nocapture %r) nounwind {
 entry:
 ; ALL-LABEL: sdivrem2:
 
@@ -347,12 +347,12 @@ entry:
 ; ALL: .end sdivrem2
 
   %rem = srem i64 %a0, %a1
-  store i64 %rem, i64* %r, align 8
+  store i64 %rem, ptr %r, align 8
   %div = sdiv i64 %a0, %a1
   ret i64 %div
 }
 
-define i64 @udivrem2(i64 %a0, i64 %a1, i64* nocapture %r) nounwind {
+define i64 @udivrem2(i64 %a0, i64 %a1, ptr nocapture %r) nounwind {
 entry:
 ; ALL-LABEL: udivrem2:
 
@@ -382,7 +382,7 @@ entry:
 ; ALL: .end udivrem2
 
   %rem = urem i64 %a0, %a1
-  store i64 %rem, i64* %r, align 8
+  store i64 %rem, ptr %r, align 8
   %div = udiv i64 %a0, %a1
   ret i64 %div
 }

diff  --git a/llvm/test/CodeGen/Mips/divu.ll b/llvm/test/CodeGen/Mips/divu.ll
index ce1b70cacf6f0..caa49fd472b37 100644
--- a/llvm/test/CodeGen/Mips/divu.ll
+++ b/llvm/test/CodeGen/Mips/divu.ll
@@ -6,12 +6,12 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %div = udiv i32 %0, %1
 ; 16:	divu	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}
-  store i32 %div, i32* @kkkk, align 4
+  store i32 %div, ptr @kkkk, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/divu_remu.ll b/llvm/test/CodeGen/Mips/divu_remu.ll
index 0e094cbe48ae3..820633be119be 100644
--- a/llvm/test/CodeGen/Mips/divu_remu.ll
+++ b/llvm/test/CodeGen/Mips/divu_remu.ll
@@ -8,15 +8,15 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %div = udiv i32 %0, %1
-  store i32 %div, i32* @kkkk, align 4
+  store i32 %div, ptr @kkkk, align 4
   %rem = urem i32 %0, %1
 ; 16:	divu	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
-  store i32 %rem, i32* @llll, align 4
+  store i32 %rem, ptr @llll, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/dsp-patterns.ll b/llvm/test/CodeGen/Mips/dsp-patterns.ll
index 250d3eff37dc5..ba6c4d972c56d 100644
--- a/llvm/test/CodeGen/Mips/dsp-patterns.ll
+++ b/llvm/test/CodeGen/Mips/dsp-patterns.ll
@@ -4,30 +4,30 @@
 ; R1-LABEL: test_lbux:
 ; R1: lbux ${{[0-9]+}}
 
-define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
+define zeroext i8 @test_lbux(ptr nocapture %b, i32 %i) {
 entry:
-  %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i
-  %0 = load i8, i8* %add.ptr, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %b, i32 %i
+  %0 = load i8, ptr %add.ptr, align 1
   ret i8 %0
 }
 
 ; R1-LABEL: test_lhx:
 ; R1: lhx ${{[0-9]+}}
 
-define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
+define signext i16 @test_lhx(ptr nocapture %b, i32 %i) {
 entry:
-  %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i
-  %0 = load i16, i16* %add.ptr, align 2
+  %add.ptr = getelementptr inbounds i16, ptr %b, i32 %i
+  %0 = load i16, ptr %add.ptr, align 2
   ret i16 %0
 }
 
 ; R1-LABEL: test_lwx:
 ; R1: lwx ${{[0-9]+}}
 
-define i32 @test_lwx(i32* nocapture %b, i32 %i) {
+define i32 @test_lwx(ptr nocapture %b, i32 %i) {
 entry:
-  %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i
-  %0 = load i32, i32* %add.ptr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %b, i32 %i
+  %0 = load i32, ptr %add.ptr, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/dsp-r1.ll b/llvm/test/CodeGen/Mips/dsp-r1.ll
index 90eb14a75b424..0ec23b9d7fd77 100644
--- a/llvm/test/CodeGen/Mips/dsp-r1.ll
+++ b/llvm/test/CodeGen/Mips/dsp-r1.ll
@@ -1204,35 +1204,35 @@ entry:
 
 declare i32 @llvm.mips.bitrev(i32) nounwind readnone
 
-define i32 @test__builtin_mips_lbux1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lbux1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
 entry:
 ; CHECK: lbux ${{[0-9]+}}
 
-  %0 = tail call i32 @llvm.mips.lbux(i8* %a0, i32 %a1)
+  %0 = tail call i32 @llvm.mips.lbux(ptr %a0, i32 %a1)
   ret i32 %0
 }
 
-declare i32 @llvm.mips.lbux(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lbux(ptr, i32) nounwind readonly
 
-define i32 @test__builtin_mips_lhx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lhx1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
 entry:
 ; CHECK: lhx ${{[0-9]+}}
 
-  %0 = tail call i32 @llvm.mips.lhx(i8* %a0, i32 %a1)
+  %0 = tail call i32 @llvm.mips.lhx(ptr %a0, i32 %a1)
   ret i32 %0
 }
 
-declare i32 @llvm.mips.lhx(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lhx(ptr, i32) nounwind readonly
 
-define i32 @test__builtin_mips_lwx1(i32 %i0, i8* %a0, i32 %a1) nounwind readonly {
+define i32 @test__builtin_mips_lwx1(i32 %i0, ptr %a0, i32 %a1) nounwind readonly {
 entry:
 ; CHECK: lwx ${{[0-9]+}}
 
-  %0 = tail call i32 @llvm.mips.lwx(i8* %a0, i32 %a1)
+  %0 = tail call i32 @llvm.mips.lwx(ptr %a0, i32 %a1)
   ret i32 %0
 }
 
-declare i32 @llvm.mips.lwx(i8*, i32) nounwind readonly
+declare i32 @llvm.mips.lwx(ptr, i32) nounwind readonly
 
 define i32 @test__builtin_mips_wrdsp1(i32 %i0, i32 %a0) nounwind {
 entry:

diff  --git a/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll b/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll
index f9251807d0000..2feefcc09879a 100644
--- a/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll
+++ b/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll
@@ -5,7 +5,7 @@
 
 define void @extend_load_trunc_store_v2i8() {
 entry:
-  %0 = load <2 x i8>, <2 x i8>* @g1, align 2
-  store <2 x i8> %0, <2 x i8>* @g0, align 2
+  %0 = load <2 x i8>, ptr @g1, align 2
+  store <2 x i8> %0, ptr @g0, align 2
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/dynamic-stack-realignment.ll b/llvm/test/CodeGen/Mips/dynamic-stack-realignment.ll
index 50acd8ffb1eb1..c201c34f96861 100644
--- a/llvm/test/CodeGen/Mips/dynamic-stack-realignment.ll
+++ b/llvm/test/CodeGen/Mips/dynamic-stack-realignment.ll
@@ -23,7 +23,7 @@
 
 ; Check dynamic stack realignment in functions without variable-sized objects.
 
-declare void @helper_01(i32, i32, i32, i32, i32*)
+declare void @helper_01(i32, i32, i32, i32, ptr)
 
 ; O32 ABI
 define void @func_01() {
@@ -58,12 +58,12 @@ entry:
   ; GP32-MMR6:  addiu   $sp, $sp, 1024
 
   %a = alloca i32, align 512
-  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a)
   ret void
 }
 
 declare void @helper_02(i32, i32, i32, i32,
-                        i32, i32, i32, i32, i32*)
+                        i32, i32, i32, i32, ptr)
 
 ; N32/N64 ABIs
 define void @func_02() {
@@ -100,16 +100,16 @@ entry:
 
   %a = alloca i32, align 512
   call void @helper_02(i32 0, i32 0, i32 0, i32 0,
-                       i32 0, i32 0, i32 0, i32 0, i32* %a)
+                       i32 0, i32 0, i32 0, i32 0, ptr %a)
   ret void
 }
 
 ; Verify that we use $fp for referencing incoming arguments.
 
-declare void @helper_03(i32, i32, i32, i32, i32*, i32*)
+declare void @helper_03(i32, i32, i32, i32, ptr, ptr)
 
 ; O32 ABI
-define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32* %b) {
+define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, ptr %b) {
 entry:
 ; GP32-LABEL: func_03:
 
@@ -123,17 +123,17 @@ entry:
   ; GP32-MM-DAG:  sw16    $[[T1]], 20(${{[0-9]+}})
 
   %a = alloca i32, align 512
-  call void @helper_03(i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+  call void @helper_03(i32 0, i32 0, i32 0, i32 0, ptr %a, ptr %b)
   ret void
 }
 
 declare void @helper_04(i32, i32, i32, i32,
-                        i32, i32, i32, i32, i32*, i32*)
+                        i32, i32, i32, i32, ptr, ptr)
 
 ; N32/N64 ABIs
 define void @func_04(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
                      i32 %p4, i32 %p5, i32 %p6, i32 %p7,
-                     i32* %b) {
+                     ptr %b) {
 entry:
 ; GP64-LABEL: func_04:
 
@@ -147,7 +147,7 @@ entry:
 
   %a = alloca i32, align 512
   call void @helper_04(i32 0, i32 0, i32 0, i32 0,
-                       i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+                       i32 0, i32 0, i32 0, i32 0, ptr %a, ptr %b)
   ret void
 }
 
@@ -188,8 +188,8 @@ entry:
   %a0 = alloca i32, i32 %sz, align 512
   %a1 = alloca i32, align 4
 
-  store volatile i32 111, i32* %a0, align 512
-  store volatile i32 222, i32* %a1, align 4
+  store volatile i32 111, ptr %a0, align 512
+  store volatile i32 222, ptr %a1, align 4
 
   ret void
 }
@@ -227,8 +227,8 @@ entry:
   %a0 = alloca i32, i32 %sz, align 512
   %a1 = alloca i32, align 4
 
-  store volatile i32 111, i32* %a0, align 512
-  store volatile i32 222, i32* %a1, align 4
+  store volatile i32 111, ptr %a0, align 512
+  store volatile i32 222, ptr %a1, align 4
 
   ret void
 }
@@ -254,10 +254,10 @@ entry:
   %a0 = alloca i32, i32 %sz, align 512
   %a1 = alloca i32, align 4
 
-  store volatile i32 111, i32* %a0, align 512
-  store volatile i32 222, i32* %a1, align 4
+  store volatile i32 111, ptr %a0, align 512
+  store volatile i32 222, ptr %a1, align 4
 
-  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a1)
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a1)
 
   ret void
 }
@@ -282,11 +282,11 @@ entry:
   %a0 = alloca i32, i32 %sz, align 512
   %a1 = alloca i32, align 4
 
-  store volatile i32 111, i32* %a0, align 512
-  store volatile i32 222, i32* %a1, align 4
+  store volatile i32 111, ptr %a0, align 512
+  store volatile i32 222, ptr %a1, align 4
 
   call void @helper_02(i32 0, i32 0, i32 0, i32 0,
-                       i32 0, i32 0, i32 0, i32 0, i32* %a1)
+                       i32 0, i32 0, i32 0, i32 0, ptr %a1)
   ret void
 }
 
@@ -299,7 +299,7 @@ entry:
   ; ALL-NOT:  and     $sp, $sp, $[[T0:[0-9]+|ra|gp]]
 
   %a = alloca i32, align 512
-  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, ptr %a)
   ret void
 }
 
@@ -312,8 +312,8 @@ entry:
   %a0 = alloca i32, i32 %sz, align 512
   %a1 = alloca i32, align 4
 
-  store volatile i32 111, i32* %a0, align 512
-  store volatile i32 222, i32* %a1, align 4
+  store volatile i32 111, ptr %a0, align 512
+  store volatile i32 222, ptr %a1, align 4
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/eh-dwarf-cfa.ll b/llvm/test/CodeGen/Mips/eh-dwarf-cfa.ll
index c4019c78d69ef..2377707b8f728 100644
--- a/llvm/test/CodeGen/Mips/eh-dwarf-cfa.ll
+++ b/llvm/test/CodeGen/Mips/eh-dwarf-cfa.ll
@@ -4,14 +4,14 @@
 ; RUN: llc -march=mips64el -mcpu=mips64 < %s | \
 ; RUN:      FileCheck %s -check-prefix=CHECK-MIPS64
 
-declare i8* @llvm.eh.dwarf.cfa(i32) nounwind
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.eh.dwarf.cfa(i32) nounwind
+declare ptr @llvm.frameaddress(i32) nounwind readnone
 
-define i8* @f1() nounwind {
+define ptr @f1() nounwind {
 entry:
   %x = alloca [32 x i8], align 1
-  %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+  ret ptr %0
 
 ; CHECK-LABEL: f1:
 
@@ -20,11 +20,11 @@ entry:
 }
 
 
-define i8* @f2() nounwind {
+define ptr @f2() nounwind {
 entry:
   %x = alloca [65536 x i8], align 1
-  %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+  ret ptr %0
 
 ; CHECK-LABEL: f2:
 
@@ -43,10 +43,10 @@ entry:
 define i32 @f3() nounwind {
 entry:
   %x = alloca [32 x i8], align 1
-  %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
-  %1 = ptrtoint i8* %0 to i32
-  %2 = call i8* @llvm.frameaddress(i32 0)
-  %3 = ptrtoint i8* %2 to i32
+  %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+  %1 = ptrtoint ptr %0 to i32
+  %2 = call ptr @llvm.frameaddress(i32 0)
+  %3 = ptrtoint ptr %2 to i32
   %add = add i32 %1, %3
   ret i32 %add
 
@@ -60,11 +60,11 @@ entry:
 }
 
 
-define i8* @f4() nounwind {
+define ptr @f4() nounwind {
 entry:
   %x = alloca [32 x i8], align 1
-  %0 = call i8* @llvm.eh.dwarf.cfa(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.eh.dwarf.cfa(i32 0)
+  ret ptr %0
 
 ; CHECK-LABEL: f4:
 

diff  --git a/llvm/test/CodeGen/Mips/eh-return32.ll b/llvm/test/CodeGen/Mips/eh-return32.ll
index a11a43cb406e7..983fc6f7788c7 100644
--- a/llvm/test/CodeGen/Mips/eh-return32.ll
+++ b/llvm/test/CodeGen/Mips/eh-return32.ll
@@ -2,13 +2,13 @@
 ; RUN: llc -march=mipsel -mcpu=mips32r2 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,NOT-R6
 ; RUN: llc -march=mipsel -mcpu=mips32r6 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,R6
 
-declare void @llvm.eh.return.i32(i32, i8*)
+declare void @llvm.eh.return.i32(i32, ptr)
 declare void @foo(...)
 
-define i8* @f1(i32 %offset, i8* %handler) {
+define ptr @f1(i32 %offset, ptr %handler) {
 entry:
   call void (...) @foo()
-  call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+  call void @llvm.eh.return.i32(i32 %offset, ptr %handler)
   unreachable
 
 ; CHECK:    f1:
@@ -48,9 +48,9 @@ entry:
 ; CHECK:        addu    $sp, $sp, $3
 }
 
-define i8* @f2(i32 %offset, i8* %handler) {
+define ptr @f2(i32 %offset, ptr %handler) {
 entry:
-  call void @llvm.eh.return.i32(i32 %offset, i8* %handler)
+  call void @llvm.eh.return.i32(i32 %offset, ptr %handler)
   unreachable
 
 ; CHECK:    f2:

diff  --git a/llvm/test/CodeGen/Mips/eh-return64.ll b/llvm/test/CodeGen/Mips/eh-return64.ll
index 496e3abcf9c50..9ae2f00d46c1e 100644
--- a/llvm/test/CodeGen/Mips/eh-return64.ll
+++ b/llvm/test/CodeGen/Mips/eh-return64.ll
@@ -3,13 +3,13 @@
 ; RUN: llc -march=mips64el -mcpu=mips64r2 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,NOT-R6
 ; RUN: llc -march=mips64el -mcpu=mips64r6 -asm-show-inst -relocation-model=pic < %s | FileCheck %s -check-prefixes=CHECK,R6
 
-declare void @llvm.eh.return.i64(i64, i8*)
+declare void @llvm.eh.return.i64(i64, ptr)
 declare void @foo(...)
 
-define void @f1(i64 %offset, i8* %handler) {
+define void @f1(i64 %offset, ptr %handler) {
 entry:
   call void (...) @foo()
-  call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
+  call void @llvm.eh.return.i64(i64 %offset, ptr %handler)
   unreachable
 
 ; CHECK:    f1:
@@ -49,9 +49,9 @@ entry:
 ; CHECK:        daddu   $sp, $sp, $3
 }
 
-define void @f2(i64 %offset, i8* %handler) {
+define void @f2(i64 %offset, ptr %handler) {
 entry:
-  call void @llvm.eh.return.i64(i64 %offset, i8* %handler)
+  call void @llvm.eh.return.i64(i64 %offset, ptr %handler)
   unreachable
 
 ; CHECK:    f2:

diff  --git a/llvm/test/CodeGen/Mips/eh.ll b/llvm/test/CodeGen/Mips/eh.ll
index 2f843d9da9a61..217059b477313 100644
--- a/llvm/test/CodeGen/Mips/eh.ll
+++ b/llvm/test/CodeGen/Mips/eh.ll
@@ -2,9 +2,9 @@
 ; RUN: llc  < %s -march=mips   | FileCheck %s -check-prefix=CHECK-EB
 
 @g1 = global double 0.000000e+00, align 8
- at _ZTId = external constant i8*
+ at _ZTId = external constant ptr
 
-define void @_Z1fd(double %i2) personality i32 (...)* @__gxx_personality_v0 {
+define void @_Z1fd(double %i2) personality ptr @__gxx_personality_v0 {
 entry:
 ; CHECK-EL:  addiu $sp, $sp
 ; CHECK-EL:  .cfi_def_cfa_offset
@@ -16,49 +16,47 @@ entry:
 ; CHECK-EB:  .cfi_offset 52, -4
 ; CHECK-EL:  .cfi_offset 31, -12
 
-  %exception = tail call i8* @__cxa_allocate_exception(i32 8) nounwind
-  %0 = bitcast i8* %exception to double*
-  store double 3.200000e+00, double* %0, align 8
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTId to i8*), i8* null) noreturn
+  %exception = tail call ptr @__cxa_allocate_exception(i32 8) nounwind
+  store double 3.200000e+00, ptr %exception, align 8
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTId, ptr null) noreturn
           to label %unreachable unwind label %lpad
 
 lpad:                                             ; preds = %entry
 ; CHECK-EL:  # %lpad
 ; CHECK-EL:  bne $5
 
-  %exn.val = landingpad { i8*, i32 }
+  %exn.val = landingpad { ptr, i32 }
            cleanup
-           catch i8* bitcast (i8** @_ZTId to i8*)
-  %exn = extractvalue { i8*, i32 } %exn.val, 0
-  %sel = extractvalue { i8*, i32 } %exn.val, 1
-  %1 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*)) nounwind
-  %2 = icmp eq i32 %sel, %1
-  br i1 %2, label %catch, label %eh.resume
+           catch ptr @_ZTId
+  %exn = extractvalue { ptr, i32 } %exn.val, 0
+  %sel = extractvalue { ptr, i32 } %exn.val, 1
+  %0 = tail call i32 @llvm.eh.typeid.for(ptr @_ZTId) nounwind
+  %1 = icmp eq i32 %sel, %0
+  br i1 %1, label %catch, label %eh.resume
 
 catch:                                            ; preds = %lpad
-  %3 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind
-  %4 = bitcast i8* %3 to double*
-  %exn.scalar = load double, double* %4, align 8
+  %2 = tail call ptr @__cxa_begin_catch(ptr %exn) nounwind
+  %exn.scalar = load double, ptr %2, align 8
   %add = fadd double %exn.scalar, %i2
-  store double %add, double* @g1, align 8
+  store double %add, ptr @g1, align 8
   tail call void @__cxa_end_catch() nounwind
   ret void
 
 eh.resume:                                        ; preds = %lpad
-  resume { i8*, i32 } %exn.val
+  resume { ptr, i32 } %exn.val
 
 unreachable:                                      ; preds = %entry
   unreachable
 }
 
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i32 @llvm.eh.typeid.for(i8*) nounwind
+declare i32 @llvm.eh.typeid.for(ptr) nounwind
 
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()

diff  --git a/llvm/test/CodeGen/Mips/ehframe-indirect.ll b/llvm/test/CodeGen/Mips/ehframe-indirect.ll
index 98a2f389ed042..59f358316ddfd 100644
--- a/llvm/test/CodeGen/Mips/ehframe-indirect.ll
+++ b/llvm/test/CodeGen/Mips/ehframe-indirect.ll
@@ -15,9 +15,9 @@
 ; RUN: llc -mtriple=mips64-unknown-freebsd11.0 < %s -asm-verbose -relocation-model=pic | \
 ; RUN:     FileCheck -check-prefixes=ALL,FREEBSD,FREEBSD-NEW,N64 %s
 
- at _ZTISt9exception = external constant i8*
+ at _ZTISt9exception = external constant ptr
 
-define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @main() personality ptr @__gxx_personality_v0 {
 ; ALL: .cfi_startproc
 
 ; Linux must rely on the assembler/linker converting the encodings.
@@ -36,9 +36,9 @@ entry:
 ; ALL: jalr
 
 lpad:
-  %0 = landingpad { i8*, i32 }
-    catch i8* null
-    catch i8* bitcast (i8** @_ZTISt9exception to i8*)
+  %0 = landingpad { ptr, i32 }
+    catch ptr null
+    catch ptr @_ZTISt9exception
   ret i32 0
 
 cont:

diff  --git a/llvm/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll b/llvm/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
index 042d003a82245..b1045a08dd3a6 100644
--- a/llvm/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
+++ b/llvm/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll
@@ -6,7 +6,7 @@ target triple="mipsel--"
 ; emergency spill slot.  Filed PR48301.
 ; XFAIL: *
 @var = external global i32
- at ptrvar = external global i8*
+ at ptrvar = external global ptr
 
 ; CHECK-LABEL: func:
 define void @func() {
@@ -14,52 +14,51 @@ define void @func() {
   %stackspace = alloca[16384 x i32], align 4
 
   ; ensure stackspace is not optimized out
-  %stackspace_casted = bitcast [16384 x i32]* %stackspace to i8*
-  store volatile i8* %stackspace_casted, i8** @ptrvar
+  store volatile ptr %stackspace, ptr @ptrvar
 
   ; Load values to increase register pressure.
-  %v0 = load volatile i32, i32* @var
-  %v1 = load volatile i32, i32* @var
-  %v2 = load volatile i32, i32* @var
-  %v3 = load volatile i32, i32* @var
-  %v4 = load volatile i32, i32* @var
-  %v5 = load volatile i32, i32* @var
-  %v6 = load volatile i32, i32* @var
-  %v7 = load volatile i32, i32* @var
-  %v8 = load volatile i32, i32* @var
-  %v9 = load volatile i32, i32* @var
-  %v10 = load volatile i32, i32* @var
-  %v11 = load volatile i32, i32* @var
-  %v12 = load volatile i32, i32* @var
-  %v13 = load volatile i32, i32* @var
-  %v14 = load volatile i32, i32* @var
-  %v15 = load volatile i32, i32* @var
-  %v16 = load volatile i32, i32* @var
+  %v0 = load volatile i32, ptr @var
+  %v1 = load volatile i32, ptr @var
+  %v2 = load volatile i32, ptr @var
+  %v3 = load volatile i32, ptr @var
+  %v4 = load volatile i32, ptr @var
+  %v5 = load volatile i32, ptr @var
+  %v6 = load volatile i32, ptr @var
+  %v7 = load volatile i32, ptr @var
+  %v8 = load volatile i32, ptr @var
+  %v9 = load volatile i32, ptr @var
+  %v10 = load volatile i32, ptr @var
+  %v11 = load volatile i32, ptr @var
+  %v12 = load volatile i32, ptr @var
+  %v13 = load volatile i32, ptr @var
+  %v14 = load volatile i32, ptr @var
+  %v15 = load volatile i32, ptr @var
+  %v16 = load volatile i32, ptr @var
 
   ; Computing a stack-relative values needs an additional register.
   ; We should get an emergency spill/reload for this.
   ; CHECK: sw ${{.*}}, 0($sp)
   ; CHECK: lw ${{.*}}, 0($sp)
-  store volatile i32 %v0, i32* %space
+  store volatile i32 %v0, ptr %space
 
   ; store values so they are used.
-  store volatile i32 %v0, i32* @var
-  store volatile i32 %v1, i32* @var
-  store volatile i32 %v2, i32* @var
-  store volatile i32 %v3, i32* @var
-  store volatile i32 %v4, i32* @var
-  store volatile i32 %v5, i32* @var
-  store volatile i32 %v6, i32* @var
-  store volatile i32 %v7, i32* @var
-  store volatile i32 %v8, i32* @var
-  store volatile i32 %v9, i32* @var
-  store volatile i32 %v10, i32* @var
-  store volatile i32 %v11, i32* @var
-  store volatile i32 %v12, i32* @var
-  store volatile i32 %v13, i32* @var
-  store volatile i32 %v14, i32* @var
-  store volatile i32 %v15, i32* @var
-  store volatile i32 %v16, i32* @var
+  store volatile i32 %v0, ptr @var
+  store volatile i32 %v1, ptr @var
+  store volatile i32 %v2, ptr @var
+  store volatile i32 %v3, ptr @var
+  store volatile i32 %v4, ptr @var
+  store volatile i32 %v5, ptr @var
+  store volatile i32 %v6, ptr @var
+  store volatile i32 %v7, ptr @var
+  store volatile i32 %v8, ptr @var
+  store volatile i32 %v9, ptr @var
+  store volatile i32 %v10, ptr @var
+  store volatile i32 %v11, ptr @var
+  store volatile i32 %v12, ptr @var
+  store volatile i32 %v13, ptr @var
+  store volatile i32 %v14, ptr @var
+  store volatile i32 %v15, ptr @var
+  store volatile i32 %v16, ptr @var
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/emit-big-cst.ll b/llvm/test/CodeGen/Mips/emit-big-cst.ll
index 679824ef047b6..cd0666cfd3fc2 100644
--- a/llvm/test/CodeGen/Mips/emit-big-cst.ll
+++ b/llvm/test/CodeGen/Mips/emit-big-cst.ll
@@ -38,21 +38,19 @@
 
 @bigCst = internal constant i82 483673642326615442599424
 
-define void @accessBig(i64* %storage) {
-  %addr = bitcast i64* %storage to i82*
-  %bigLoadedCst = load volatile i82, i82* @bigCst
+define void @accessBig(ptr %storage) {
+  %bigLoadedCst = load volatile i82, ptr @bigCst
   %tmp = add i82 %bigLoadedCst, 1
-  store i82 %tmp, i82* %addr
+  store i82 %tmp, ptr %storage
   ret void
 }
 
 @notSoBigCst = internal constant i57 72057594037927935
 
-define void @accessNotSoBig(i64* %storage) {
-  %addr = bitcast i64* %storage to i57*
-  %bigLoadedCst = load volatile i57, i57* @notSoBigCst
+define void @accessNotSoBig(ptr %storage) {
+  %bigLoadedCst = load volatile i57, ptr @notSoBigCst
   %tmp = add i57 %bigLoadedCst, 1
-  store i57 %tmp, i57* %addr
+  store i57 %tmp, ptr %storage
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/emutls_generic.ll b/llvm/test/CodeGen/Mips/emutls_generic.ll
index a529646b88fc3..344a581d6b4b7 100644
--- a/llvm/test/CodeGen/Mips/emutls_generic.ll
+++ b/llvm/test/CodeGen/Mips/emutls_generic.ll
@@ -14,19 +14,19 @@
 @external_y = thread_local global i8 7, align 2
 @internal_y = internal thread_local global i64 9, align 16
 
-define i32* @get_external_x() {
+define ptr @get_external_x() {
 entry:
-  ret i32* @external_x
+  ret ptr @external_x
 }
 
-define i8* @get_external_y() {
+define ptr @get_external_y() {
 entry:
-  ret i8* @external_y
+  ret ptr @external_y
 }
 
-define i64* @get_internal_y() {
+define ptr @get_internal_y() {
 entry:
-  ret i64* @internal_y
+  ret ptr @internal_y
 }
 
 ; MIPS_32-LABEL: get_external_y:

diff  --git a/llvm/test/CodeGen/Mips/ex2.ll b/llvm/test/CodeGen/Mips/ex2.ll
index 87fe77035ec26..79aabfcbbfc43 100644
--- a/llvm/test/CodeGen/Mips/ex2.ll
+++ b/llvm/test/CodeGen/Mips/ex2.ll
@@ -1,7 +1,7 @@
 ; RUN: llc  -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
 
 @.str = private unnamed_addr constant [6 x i8] c"hello\00", align 1
- at _ZTIPKc = external constant i8*
+ at _ZTIPKc = external constant ptr
 
 define i32 @main() {
 ; 16-LABEL: main:
@@ -14,18 +14,17 @@ define i32 @main() {
 ; 16:   .cfi_endproc
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
-  %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind
-  %0 = bitcast i8* %exception to i8**
-  store i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i32 0, i32 0), i8** %0
-  call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIPKc to i8*), i8* null) noreturn
+  store i32 0, ptr %retval
+  %exception = call ptr @__cxa_allocate_exception(i32 4) nounwind
+  store ptr @.str, ptr %exception
+  call void @__cxa_throw(ptr %exception, ptr @_ZTIPKc, ptr null) noreturn
   unreachable
 
 return:                                           ; No predecessors!
-  %1 = load i32, i32* %retval
-  ret i32 %1
+  %0 = load i32, ptr %retval
+  ret i32 %0
 }
 
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
 
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)

diff  --git a/llvm/test/CodeGen/Mips/extins.ll b/llvm/test/CodeGen/Mips/extins.ll
index 0b327a91bbfd2..9989c3c00dc79 100644
--- a/llvm/test/CodeGen/Mips/extins.ll
+++ b/llvm/test/CodeGen/Mips/extins.ll
@@ -10,15 +10,15 @@ entry:
   ret i32 %and
 }
 
-define void @ins2_5_9(i32 %s, i32* nocapture %d) nounwind {
+define void @ins2_5_9(i32 %s, ptr nocapture %d) nounwind {
 entry:
 ; 32R2: ins ${{[0-9]+}}, $4, 5, 9
 ; 16-NOT: ins ${{[0-9]+}}
   %and = shl i32 %s, 5
   %shl = and i32 %and, 16352
-  %tmp3 = load i32, i32* %d, align 4
+  %tmp3 = load i32, ptr %d, align 4
   %and5 = and i32 %tmp3, -16353
   %or = or i32 %and5, %shl
-  store i32 %or, i32* %d, align 4
+  store i32 %or, ptr %d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/f16abs.ll b/llvm/test/CodeGen/Mips/f16abs.ll
index 847ef627b7efa..30ff458f5a463 100644
--- a/llvm/test/CodeGen/Mips/f16abs.ll
+++ b/llvm/test/CodeGen/Mips/f16abs.ll
@@ -11,14 +11,14 @@
 ; Function Attrs: nounwind optsize
 define i32 @main() #0 {
 entry:
-  %0 = load double, double* @y, align 8
+  %0 = load double, ptr @y, align 8
   %call = tail call double @fabs(double %0) #2
-  store double %call, double* @x, align 8
+  store double %call, ptr @x, align 8
 ; static-NOT: 	.ent	__call_stub_fp_fabs
 ; static-NOT: 	jal fabs
-  %1 = load float, float* @y1, align 4
+  %1 = load float, ptr @y1, align 4
   %call2 = tail call float @fabsf(float %1) #2
-  store float %call2, float* @x1, align 4
+  store float %call2, ptr @x1, align 4
 ; static-NOT: 	.ent	__call_stub_fp_fabsf
 ; static-NOT: 	jal fabsf
   ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/f32-to-i64-single-float.ll b/llvm/test/CodeGen/Mips/f32-to-i64-single-float.ll
index 47d86f57e0c26..fd6063cea7f58 100644
--- a/llvm/test/CodeGen/Mips/f32-to-i64-single-float.ll
+++ b/llvm/test/CodeGen/Mips/f32-to-i64-single-float.ll
@@ -3,7 +3,7 @@
 ; This test casts a 32-bit float to a 64-bit int. This would cause a crash due
 ; to LLVM incorrectly lowering the float on single-float platforms.
 
-define void @foo(float* %in, i64* %out) {
+define void @foo(ptr %in, ptr %out) {
 ; CHECK-LABEL: foo:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    daddiu $sp, $sp, -16
@@ -39,14 +39,14 @@ define void @foo(float* %in, i64* %out) {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    daddiu $sp, $sp, 16
 entry:
-  %in.addr = alloca float*, align 8
-  %out.addr = alloca i64*, align 8
-  store float* %in, float** %in.addr, align 8
-  store i64* %out, i64** %out.addr, align 8
-  %0 = load float*, float** %in.addr, align 8
-  %1 = load float, float* %0, align 4
+  %in.addr = alloca ptr, align 8
+  %out.addr = alloca ptr, align 8
+  store ptr %in, ptr %in.addr, align 8
+  store ptr %out, ptr %out.addr, align 8
+  %0 = load ptr, ptr %in.addr, align 8
+  %1 = load float, ptr %0, align 4
   %conv = fptosi float %1 to i64
-  %2 = load i64*, i64** %out.addr, align 8
-  store i64 %conv, i64* %2, align 8
+  %2 = load ptr, ptr %out.addr, align 8
+  store i64 %conv, ptr %2, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/fastcc.ll b/llvm/test/CodeGen/Mips/fastcc.ll
index e48dee4721d84..ccdeedce6d235 100644
--- a/llvm/test/CodeGen/Mips/fastcc.ll
+++ b/llvm/test/CodeGen/Mips/fastcc.ll
@@ -108,23 +108,23 @@ entry:
 ; CHECK-NACL-NOT: lw  $15
 ; CHECK-NACL-NOT: lw  $24
 
-  %0 = load i32, i32* @gi0, align 4
-  %1 = load i32, i32* @gi1, align 4
-  %2 = load i32, i32* @gi2, align 4
-  %3 = load i32, i32* @gi3, align 4
-  %4 = load i32, i32* @gi4, align 4
-  %5 = load i32, i32* @gi5, align 4
-  %6 = load i32, i32* @gi6, align 4
-  %7 = load i32, i32* @gi7, align 4
-  %8 = load i32, i32* @gi8, align 4
-  %9 = load i32, i32* @gi9, align 4
-  %10 = load i32, i32* @gi10, align 4
-  %11 = load i32, i32* @gi11, align 4
-  %12 = load i32, i32* @gi12, align 4
-  %13 = load i32, i32* @gi13, align 4
-  %14 = load i32, i32* @gi14, align 4
-  %15 = load i32, i32* @gi15, align 4
-  %16 = load i32, i32* @gi16, align 4
+  %0 = load i32, ptr @gi0, align 4
+  %1 = load i32, ptr @gi1, align 4
+  %2 = load i32, ptr @gi2, align 4
+  %3 = load i32, ptr @gi3, align 4
+  %4 = load i32, ptr @gi4, align 4
+  %5 = load i32, ptr @gi5, align 4
+  %6 = load i32, ptr @gi6, align 4
+  %7 = load i32, ptr @gi7, align 4
+  %8 = load i32, ptr @gi8, align 4
+  %9 = load i32, ptr @gi9, align 4
+  %10 = load i32, ptr @gi10, align 4
+  %11 = load i32, ptr @gi11, align 4
+  %12 = load i32, ptr @gi12, align 4
+  %13 = load i32, ptr @gi13, align 4
+  %14 = load i32, ptr @gi14, align 4
+  %15 = load i32, ptr @gi15, align 4
+  %16 = load i32, ptr @gi16, align 4
   tail call fastcc void @callee0(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16)
   ret void
 }
@@ -151,23 +151,23 @@ entry:
 ; CHECK-NACL-NOT: sw  $15
 ; CHECK-NACL-NOT: sw  $24
 
-  store i32 %a0, i32* @g0, align 4
-  store i32 %a1, i32* @g1, align 4
-  store i32 %a2, i32* @g2, align 4
-  store i32 %a3, i32* @g3, align 4
-  store i32 %a4, i32* @g4, align 4
-  store i32 %a5, i32* @g5, align 4
-  store i32 %a6, i32* @g6, align 4
-  store i32 %a7, i32* @g7, align 4
-  store i32 %a8, i32* @g8, align 4
-  store i32 %a9, i32* @g9, align 4
-  store i32 %a10, i32* @g10, align 4
-  store i32 %a11, i32* @g11, align 4
-  store i32 %a12, i32* @g12, align 4
-  store i32 %a13, i32* @g13, align 4
-  store i32 %a14, i32* @g14, align 4
-  store i32 %a15, i32* @g15, align 4
-  store i32 %a16, i32* @g16, align 4
+  store i32 %a0, ptr @g0, align 4
+  store i32 %a1, ptr @g1, align 4
+  store i32 %a2, ptr @g2, align 4
+  store i32 %a3, ptr @g3, align 4
+  store i32 %a4, ptr @g4, align 4
+  store i32 %a5, ptr @g5, align 4
+  store i32 %a6, ptr @g6, align 4
+  store i32 %a7, ptr @g7, align 4
+  store i32 %a8, ptr @g8, align 4
+  store i32 %a9, ptr @g9, align 4
+  store i32 %a10, ptr @g10, align 4
+  store i32 %a11, ptr @g11, align 4
+  store i32 %a12, ptr @g12, align 4
+  store i32 %a13, ptr @g13, align 4
+  store i32 %a14, ptr @g14, align 4
+  store i32 %a15, ptr @g15, align 4
+  store i32 %a16, ptr @g16, align 4
   ret void
 }
 
@@ -195,27 +195,27 @@ entry:
 ; CHECK: lwc1  $f1
 ; CHECK: lwc1  $f0
 
-  %0 = load float, float* @gfa0, align 4
-  %1 = load float, float* @gfa1, align 4
-  %2 = load float, float* @gfa2, align 4
-  %3 = load float, float* @gfa3, align 4
-  %4 = load float, float* @gfa4, align 4
-  %5 = load float, float* @gfa5, align 4
-  %6 = load float, float* @gfa6, align 4
-  %7 = load float, float* @gfa7, align 4
-  %8 = load float, float* @gfa8, align 4
-  %9 = load float, float* @gfa9, align 4
-  %10 = load float, float* @gfa10, align 4
-  %11 = load float, float* @gfa11, align 4
-  %12 = load float, float* @gfa12, align 4
-  %13 = load float, float* @gfa13, align 4
-  %14 = load float, float* @gfa14, align 4
-  %15 = load float, float* @gfa15, align 4
-  %16 = load float, float* @gfa16, align 4
-  %17 = load float, float* @gfa17, align 4
-  %18 = load float, float* @gfa18, align 4
-  %19 = load float, float* @gfa19, align 4
-  %20 = load float, float* @gfa20, align 4
+  %0 = load float, ptr @gfa0, align 4
+  %1 = load float, ptr @gfa1, align 4
+  %2 = load float, ptr @gfa2, align 4
+  %3 = load float, ptr @gfa3, align 4
+  %4 = load float, ptr @gfa4, align 4
+  %5 = load float, ptr @gfa5, align 4
+  %6 = load float, ptr @gfa6, align 4
+  %7 = load float, ptr @gfa7, align 4
+  %8 = load float, ptr @gfa8, align 4
+  %9 = load float, ptr @gfa9, align 4
+  %10 = load float, ptr @gfa10, align 4
+  %11 = load float, ptr @gfa11, align 4
+  %12 = load float, ptr @gfa12, align 4
+  %13 = load float, ptr @gfa13, align 4
+  %14 = load float, ptr @gfa14, align 4
+  %15 = load float, ptr @gfa15, align 4
+  %16 = load float, ptr @gfa16, align 4
+  %17 = load float, ptr @gfa17, align 4
+  %18 = load float, ptr @gfa18, align 4
+  %19 = load float, ptr @gfa19, align 4
+  %20 = load float, ptr @gfa20, align 4
   tail call fastcc void @callee1(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20)
   ret void
 }
@@ -244,27 +244,27 @@ entry:
 ; CHECK-DAG: swc1  $f18
 ; CHECK-DAG: swc1  $f19
 
-  store float %a0, float* @gf0, align 4
-  store float %a1, float* @gf1, align 4
-  store float %a2, float* @gf2, align 4
-  store float %a3, float* @gf3, align 4
-  store float %a4, float* @gf4, align 4
-  store float %a5, float* @gf5, align 4
-  store float %a6, float* @gf6, align 4
-  store float %a7, float* @gf7, align 4
-  store float %a8, float* @gf8, align 4
-  store float %a9, float* @gf9, align 4
-  store float %a10, float* @gf10, align 4
-  store float %a11, float* @gf11, align 4
-  store float %a12, float* @gf12, align 4
-  store float %a13, float* @gf13, align 4
-  store float %a14, float* @gf14, align 4
-  store float %a15, float* @gf15, align 4
-  store float %a16, float* @gf16, align 4
-  store float %a17, float* @gf17, align 4
-  store float %a18, float* @gf18, align 4
-  store float %a19, float* @gf19, align 4
-  store float %a20, float* @gf20, align 4
+  store float %a0, ptr @gf0, align 4
+  store float %a1, ptr @gf1, align 4
+  store float %a2, ptr @gf2, align 4
+  store float %a3, ptr @gf3, align 4
+  store float %a4, ptr @gf4, align 4
+  store float %a5, ptr @gf5, align 4
+  store float %a6, ptr @gf6, align 4
+  store float %a7, ptr @gf7, align 4
+  store float %a8, ptr @gf8, align 4
+  store float %a9, ptr @gf9, align 4
+  store float %a10, ptr @gf10, align 4
+  store float %a11, ptr @gf11, align 4
+  store float %a12, ptr @gf12, align 4
+  store float %a13, ptr @gf13, align 4
+  store float %a14, ptr @gf14, align 4
+  store float %a15, ptr @gf15, align 4
+  store float %a16, ptr @gf16, align 4
+  store float %a17, ptr @gf17, align 4
+  store float %a18, ptr @gf18, align 4
+  store float %a19, ptr @gf19, align 4
+  store float %a20, ptr @gf20, align 4
   ret void
 }
 
@@ -291,17 +291,17 @@ entry:
 ; NOODDSPREG-DAG:    lwc1    $[[F0:f[0-9]*[02468]]], 40($[[R0]])
 ; NOODDSPREG-DAG:    swc1    $[[F0]], 0($sp)
 
-  %0 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4
-  %1 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4
-  %2 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4
-  %3 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4
-  %4 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4
-  %5 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4
-  %6 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4
-  %7 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4
-  %8 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4
-  %9 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4
-  %10 = load float, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4
+  %0 = load float, ptr @fa, align 4
+  %1 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 1), align 4
+  %2 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 2), align 4
+  %3 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 3), align 4
+  %4 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 4), align 4
+  %5 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 5), align 4
+  %6 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 6), align 4
+  %7 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 7), align 4
+  %8 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 8), align 4
+  %9 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 9), align 4
+  %10 = load float, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 10), align 4
   tail call fastcc void @callee2(float %0, float %1, float %2, float %3,
                                  float %4, float %5, float %6, float %7,
                                  float %8, float %9, float %10)
@@ -333,17 +333,17 @@ entry:
 ; NOODDSPREG-DAG:    lwc1    $[[F0:f[0-9]*[02468]]], {{[0-9]+}}($sp)
 ; NOODDSPREG-DAG:    swc1    $[[F0]], 40($[[R0]])
 
-  store float %a0, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 0), align 4
-  store float %a1, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 1), align 4
-  store float %a2, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 2), align 4
-  store float %a3, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 3), align 4
-  store float %a4, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 4), align 4
-  store float %a5, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 5), align 4
-  store float %a6, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 6), align 4
-  store float %a7, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 7), align 4
-  store float %a8, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 8), align 4
-  store float %a9, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 9), align 4
-  store float %a10, float* getelementptr ([11 x float], [11 x float]* @fa, i32 0, i32 10), align 4
+  store float %a0, ptr @fa, align 4
+  store float %a1, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 1), align 4
+  store float %a2, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 2), align 4
+  store float %a3, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 3), align 4
+  store float %a4, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 4), align 4
+  store float %a5, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 5), align 4
+  store float %a6, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 6), align 4
+  store float %a7, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 7), align 4
+  store float %a8, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 8), align 4
+  store float %a9, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 9), align 4
+  store float %a10, ptr getelementptr ([11 x float], ptr @fa, i32 0, i32 10), align 4
   ret void
 }
 
@@ -370,17 +370,17 @@ entry:
 ; FP64-NOODDSPREG-DAG:    ldc1    $[[F0:f[0-9]*[02468]]], 80($[[R0]])
 ; FP64-NOODDSPREG-DAG:    sdc1    $[[F0]], 0($sp)
 
-  %0 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8
-  %1 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8
-  %2 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8
-  %3 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8
-  %4 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8
-  %5 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8
-  %6 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8
-  %7 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8
-  %8 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8
-  %9 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8
-  %10 = load double, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8
+  %0 = load double, ptr @da, align 8
+  %1 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 1), align 8
+  %2 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 2), align 8
+  %3 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 3), align 8
+  %4 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 4), align 8
+  %5 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 5), align 8
+  %6 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 6), align 8
+  %7 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 7), align 8
+  %8 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 8), align 8
+  %9 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 9), align 8
+  %10 = load double, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 10), align 8
   tail call fastcc void @callee3(double %0, double %1, double %2, double %3,
                                  double %4, double %5, double %6, double %7,
                                  double %8, double %9, double %10)
@@ -413,16 +413,16 @@ entry:
 ; FP64-NOODDSPREG-DAG:    ldc1    $[[F0:f[0-9]*[02468]]], 0($sp)
 ; FP64-NOODDSPREG-DAG:    sdc1    $[[F0]], 80($[[R0]])
 
-  store double %a0, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 0), align 8
-  store double %a1, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 1), align 8
-  store double %a2, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 2), align 8
-  store double %a3, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 3), align 8
-  store double %a4, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 4), align 8
-  store double %a5, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 5), align 8
-  store double %a6, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 6), align 8
-  store double %a7, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 7), align 8
-  store double %a8, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 8), align 8
-  store double %a9, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 9), align 8
-  store double %a10, double* getelementptr ([11 x double], [11 x double]* @da, i32 0, i32 10), align 8
+  store double %a0, ptr @da, align 8
+  store double %a1, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 1), align 8
+  store double %a2, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 2), align 8
+  store double %a3, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 3), align 8
+  store double %a4, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 4), align 8
+  store double %a5, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 5), align 8
+  store double %a6, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 6), align 8
+  store double %a7, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 7), align 8
+  store double %a8, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 8), align 8
+  store double %a9, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 9), align 8
+  store double %a10, ptr getelementptr ([11 x double], ptr @da, i32 0, i32 10), align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/fastcc_byval.ll b/llvm/test/CodeGen/Mips/fastcc_byval.ll
index 135411021dc55..250f358d88fca 100644
--- a/llvm/test/CodeGen/Mips/fastcc_byval.ll
+++ b/llvm/test/CodeGen/Mips/fastcc_byval.ll
@@ -3,25 +3,24 @@
 ; Test that a load comes after a store to the same memory location when passing
 ; a byVal parameter to a function which has a fastcc function call
 
-%struct.str = type { i32, i32, [3 x i32*] }
+%struct.str = type { i32, i32, [3 x ptr] }
 
-declare fastcc void @_Z1F3str(%struct.str* noalias nocapture sret(%struct.str) %agg.result, %struct.str* byval(%struct.str) nocapture readonly align 4 %s)
+declare fastcc void @_Z1F3str(ptr noalias nocapture sret(%struct.str) %agg.result, ptr byval(%struct.str) nocapture readonly align 4 %s)
 
-define i32 @_Z1g3str(%struct.str* byval(%struct.str) nocapture readonly align 4 %s) {
+define i32 @_Z1g3str(ptr byval(%struct.str) nocapture readonly align 4 %s) {
 ; CHECK-LABEL: _Z1g3str:
 ; CHECK: sw  $7, [[OFFSET:[0-9]+]]($sp)
 ; CHECK: lw  ${{[0-9]+}}, [[OFFSET]]($sp)
 entry:
   %ref.tmp = alloca %struct.str, align 4
-  %0 = bitcast %struct.str* %ref.tmp to i8*
-  call void @llvm.lifetime.start.p0i8(i64 20, i8* nonnull %0)
-  call fastcc void @_Z1F3str(%struct.str* nonnull sret(%struct.str) %ref.tmp, %struct.str* byval(%struct.str) nonnull align 4 %s)
-  %cl.sroa.3.0..sroa_idx2 = getelementptr inbounds %struct.str, %struct.str* %ref.tmp, i32 0, i32 1
-  %cl.sroa.3.0.copyload = load i32, i32* %cl.sroa.3.0..sroa_idx2, align 4
-  call void @llvm.lifetime.end.p0i8(i64 20, i8* nonnull %0)
+  call void @llvm.lifetime.start.p0(i64 20, ptr nonnull %ref.tmp)
+  call fastcc void @_Z1F3str(ptr nonnull sret(%struct.str) %ref.tmp, ptr byval(%struct.str) nonnull align 4 %s)
+  %cl.sroa.3.0..sroa_idx2 = getelementptr inbounds %struct.str, ptr %ref.tmp, i32 0, i32 1
+  %cl.sroa.3.0.copyload = load i32, ptr %cl.sroa.3.0..sroa_idx2, align 4
+  call void @llvm.lifetime.end.p0(i64 20, ptr nonnull %ref.tmp)
   ret i32 %cl.sroa.3.0.copyload
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)

diff  --git a/llvm/test/CodeGen/Mips/fixdfsf.ll b/llvm/test/CodeGen/Mips/fixdfsf.ll
index 5eb336bf64999..07b57f9f94825 100644
--- a/llvm/test/CodeGen/Mips/fixdfsf.ll
+++ b/llvm/test/CodeGen/Mips/fixdfsf.ll
@@ -7,9 +7,9 @@
 ; Function Attrs: nounwind optsize
 define void @foo()  {
 entry:
-  %0 = load double, double* @x, align 8
+  %0 = load double, ptr @x, align 8
   %conv = fptoui double %0 to i32
-  store i32 %conv, i32* @y, align 4
+  store i32 %conv, ptr @y, align 4
 ; pic1:	lw	${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}})
 ; pic2:	lw	${{[0-9]+}}, %got(__mips16_call_stub_2)(${{[0-9]+}})
   ret void

diff  --git a/llvm/test/CodeGen/Mips/fp-contract.ll b/llvm/test/CodeGen/Mips/fp-contract.ll
index 3933831798f8c..f25e9f5258c77 100644
--- a/llvm/test/CodeGen/Mips/fp-contract.ll
+++ b/llvm/test/CodeGen/Mips/fp-contract.ll
@@ -12,28 +12,28 @@ declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>)
 declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>)
 declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>)
 
-define void @foo(<4 x float>* %agg.result, <4 x float>* %acc, <4 x float>* %a, <4 x float>* %b) {
+define void @foo(ptr %agg.result, ptr %acc, ptr %a, ptr %b) {
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
-  %3 = load <4 x float>, <4 x float>* %acc, align 16
+  %3 = load <4 x float>, ptr %acc, align 16
   %4 = call <4 x float> @llvm.mips.fadd.w(<4 x float> %3, <4 x float> %2)
-  store <4 x float> %4, <4 x float>* %agg.result, align 16
+  store <4 x float> %4, ptr %agg.result, align 16
   ret void
   ; CHECK-CONTRACT-OFF: fmul.w
   ; CHECK-CONTRACT-OFF: fadd.w
   ; CHECK-CONTRACT-FAST: fmadd.w
 }
 
-define void @boo(<4 x float>* %agg.result, <4 x float>* %acc, <4 x float>* %a, <4 x float>* %b) {
+define void @boo(ptr %agg.result, ptr %acc, ptr %a, ptr %b) {
 entry:
-  %0 = load <4 x float>, <4 x float>* %a, align 16
-  %1 = load <4 x float>, <4 x float>* %b, align 16
+  %0 = load <4 x float>, ptr %a, align 16
+  %1 = load <4 x float>, ptr %b, align 16
   %2 = call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
-  %3 = load <4 x float>, <4 x float>* %acc, align 16
+  %3 = load <4 x float>, ptr %acc, align 16
   %4 = call <4 x float> @llvm.mips.fsub.w(<4 x float> %3, <4 x float> %2)
-  store <4 x float> %4, <4 x float>* %agg.result, align 16
+  store <4 x float> %4, ptr %agg.result, align 16
   ret void
   ; CHECK-CONTRACT-OFF: fmul.w
   ; CHECK-CONTRACT-OFF: fsub.w

diff  --git a/llvm/test/CodeGen/Mips/fp-indexed-ls.ll b/llvm/test/CodeGen/Mips/fp-indexed-ls.ll
index 87fb248e56fdb..48052b6e86143 100644
--- a/llvm/test/CodeGen/Mips/fp-indexed-ls.ll
+++ b/llvm/test/CodeGen/Mips/fp-indexed-ls.ll
@@ -19,7 +19,7 @@
 @s2 = external global [4 x %struct.S2]
 @s3 = external global %struct.S3
 
-define float @foo0(float* nocapture %b, i32 %o) nounwind readonly {
+define float @foo0(ptr nocapture %b, i32 %o) nounwind readonly {
 entry:
 ; ALL-LABEL: foo0:
 
@@ -45,12 +45,12 @@ entry:
 
 ; CHECK-NACL-NOT: lwxc1
 
-  %arrayidx = getelementptr inbounds float, float* %b, i32 %o
-  %0 = load float, float* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds float, ptr %b, i32 %o
+  %0 = load float, ptr %arrayidx, align 4
   ret float %0
 }
 
-define double @foo1(double* nocapture %b, i32 %o) nounwind readonly {
+define double @foo1(ptr nocapture %b, i32 %o) nounwind readonly {
 entry:
 ; ALL-LABEL: foo1:
 
@@ -76,8 +76,8 @@ entry:
 
 ; CHECK-NACL-NOT: ldxc1
 
-  %arrayidx = getelementptr inbounds double, double* %b, i32 %o
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %b, i32 %o
+  %0 = load double, ptr %arrayidx, align 8
   ret double %0
 }
 
@@ -100,12 +100,12 @@ entry:
 ; luxc1 was removed in MIPS64r6
 ; MIPS64R6-NOT:  luxc1
 
-  %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
-  %0 = load float, float* %arrayidx1, align 1
+  %arrayidx1 = getelementptr inbounds [4 x %struct.S], ptr @s, i32 0, i32 %b, i32 0, i32 %c
+  %0 = load float, ptr %arrayidx1, align 1
   ret float %0
 }
 
-define void @foo3(float* nocapture %b, i32 %o) nounwind {
+define void @foo3(ptr nocapture %b, i32 %o) nounwind {
 entry:
 ; ALL-LABEL: foo3:
 
@@ -129,13 +129,13 @@ entry:
 
 ; CHECK-NACL-NOT: swxc1
 
-  %0 = load float, float* @gf, align 4
-  %arrayidx = getelementptr inbounds float, float* %b, i32 %o
-  store float %0, float* %arrayidx, align 4
+  %0 = load float, ptr @gf, align 4
+  %arrayidx = getelementptr inbounds float, ptr %b, i32 %o
+  store float %0, ptr %arrayidx, align 4
   ret void
 }
 
-define void @foo4(double* nocapture %b, i32 %o) nounwind {
+define void @foo4(ptr nocapture %b, i32 %o) nounwind {
 entry:
 ; ALL-LABEL: foo4:
 
@@ -159,9 +159,9 @@ entry:
 
 ; CHECK-NACL-NOT: sdxc1
 
-  %0 = load double, double* @gd, align 8
-  %arrayidx = getelementptr inbounds double, double* %b, i32 %o
-  store double %0, double* %arrayidx, align 8
+  %0 = load double, ptr @gd, align 8
+  %arrayidx = getelementptr inbounds double, ptr %b, i32 %o
+  store double %0, ptr %arrayidx, align 8
   ret void
 }
 
@@ -179,9 +179,9 @@ entry:
 
 ; MIPS64R6-NOT:  suxc1
 
-  %0 = load float, float* @gf, align 4
-  %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
-  store float %0, float* %arrayidx1, align 1
+  %0 = load float, ptr @gf, align 4
+  %arrayidx1 = getelementptr inbounds [4 x %struct.S], ptr @s, i32 0, i32 %b, i32 0, i32 %c
+  store float %0, ptr %arrayidx1, align 1
   ret void
 }
 
@@ -199,8 +199,8 @@ entry:
 
 ; MIPS64R6-NOT:  luxc1
 
-  %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
-  %0 = load double, double* %arrayidx1, align 1
+  %arrayidx1 = getelementptr inbounds [4 x %struct.S2], ptr @s2, i32 0, i32 %b, i32 0, i32 %c
+  %0 = load double, ptr %arrayidx1, align 1
   ret double %0
 }
 
@@ -218,9 +218,9 @@ entry:
 
 ; MIPS64R6-NOT:  suxc1
 
-  %0 = load double, double* @gd, align 8
-  %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
-  store double %0, double* %arrayidx1, align 1
+  %0 = load double, ptr @gd, align 8
+  %arrayidx1 = getelementptr inbounds [4 x %struct.S2], ptr @s2, i32 0, i32 %b, i32 0, i32 %c
+  store double %0, ptr %arrayidx1, align 1
   ret void
 }
 
@@ -238,7 +238,7 @@ entry:
 
 ; MIPS64R6-NOT:  luxc1
 
-  %0 = load float, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1
+  %0 = load float, ptr getelementptr inbounds (%struct.S3, ptr @s3, i32 0, i32 1), align 1
   ret float %0
 }
 
@@ -256,7 +256,7 @@ entry:
 
 ; MIPS64R6-NOT:  suxc1
 
-  store float %f, float* getelementptr inbounds (%struct.S3, %struct.S3* @s3, i32 0, i32 1), align 1
+  store float %f, ptr getelementptr inbounds (%struct.S3, ptr @s3, i32 0, i32 1), align 1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/fp-spill-reload.ll b/llvm/test/CodeGen/Mips/fp-spill-reload.ll
index 431389ae9acb0..21a7ed040f0fd 100644
--- a/llvm/test/CodeGen/Mips/fp-spill-reload.ll
+++ b/llvm/test/CodeGen/Mips/fp-spill-reload.ll
@@ -1,34 +1,34 @@
 ; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s
 ; check that $fp is not reserved. 
 
-define void @foo0(i32* nocapture %b) nounwind {
+define void @foo0(ptr nocapture %b) nounwind {
 entry:
 ; CHECK: sw  $fp
 ; CHECK: lw  $fp
-  %0 = load i32, i32* %b, align 4
-  %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1
-  %1 = load i32, i32* %arrayidx.1, align 4
+  %0 = load i32, ptr %b, align 4
+  %arrayidx.1 = getelementptr inbounds i32, ptr %b, i32 1
+  %1 = load i32, ptr %arrayidx.1, align 4
   %add.1 = add nsw i32 %1, 1
-  %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2
-  %2 = load i32, i32* %arrayidx.2, align 4
+  %arrayidx.2 = getelementptr inbounds i32, ptr %b, i32 2
+  %2 = load i32, ptr %arrayidx.2, align 4
   %add.2 = add nsw i32 %2, 2
-  %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3
-  %3 = load i32, i32* %arrayidx.3, align 4
+  %arrayidx.3 = getelementptr inbounds i32, ptr %b, i32 3
+  %3 = load i32, ptr %arrayidx.3, align 4
   %add.3 = add nsw i32 %3, 3
-  %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4
-  %4 = load i32, i32* %arrayidx.4, align 4
+  %arrayidx.4 = getelementptr inbounds i32, ptr %b, i32 4
+  %4 = load i32, ptr %arrayidx.4, align 4
   %add.4 = add nsw i32 %4, 4
-  %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5
-  %5 = load i32, i32* %arrayidx.5, align 4
+  %arrayidx.5 = getelementptr inbounds i32, ptr %b, i32 5
+  %5 = load i32, ptr %arrayidx.5, align 4
   %add.5 = add nsw i32 %5, 5
-  %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6
-  %6 = load i32, i32* %arrayidx.6, align 4
+  %arrayidx.6 = getelementptr inbounds i32, ptr %b, i32 6
+  %6 = load i32, ptr %arrayidx.6, align 4
   %add.6 = add nsw i32 %6, 6
-  %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7
-  %7 = load i32, i32* %arrayidx.7, align 4
+  %arrayidx.7 = getelementptr inbounds i32, ptr %b, i32 7
+  %7 = load i32, ptr %arrayidx.7, align 4
   %add.7 = add nsw i32 %7, 7
   call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
-  call void bitcast (void (...)* @foo1 to void ()*)() nounwind
+  call void @foo1() nounwind
   call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/fp16-promote.ll b/llvm/test/CodeGen/Mips/fp16-promote.ll
index e3d3a0a3d12df..6cbfab5f77669 100644
--- a/llvm/test/CodeGen/Mips/fp16-promote.ll
+++ b/llvm/test/CodeGen/Mips/fp16-promote.ll
@@ -5,18 +5,18 @@
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
 ; CHECK-LIBCALL-DAG: add.s
 ; CHECK-LIBCALL-DAG: %call16(__gnu_f2h_ieee)
-define void @test_fadd(half* %p, half* %q) #0 {
-  %a = load half, half* %p, align 2
-  %b = load half, half* %q, align 2
+define void @test_fadd(ptr %p, ptr %q) #0 {
+  %a = load half, ptr %p, align 2
+  %b = load half, ptr %q, align 2
   %r = fadd half %a, %b
-  store half %r, half* %p
+  store half %r, ptr %p
   ret void
 }
 
 ; CHECK-LIBCALL-LABEL: test_fpext_float:
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
-define float @test_fpext_float(half* %p) {
-  %a = load half, half* %p, align 2
+define float @test_fpext_float(ptr %p) {
+  %a = load half, ptr %p, align 2
   %r = fpext half %a to float
   ret float %r
 }
@@ -24,25 +24,25 @@ define float @test_fpext_float(half* %p) {
 ; CHECK-LIBCALL-LABEL: test_fpext_double:
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
 ; CHECK-LIBCALL: cvt.d.s
-define double @test_fpext_double(half* %p) {
-  %a = load half, half* %p, align 2
+define double @test_fpext_double(ptr %p) {
+  %a = load half, ptr %p, align 2
   %r = fpext half %a to double
   ret double %r
 }
 
 ; CHECK-LIBCALL-LABEL: test_fptrunc_float:
 ; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
-define void @test_fptrunc_float(float %f, half* %p) #0 {
+define void @test_fptrunc_float(float %f, ptr %p) #0 {
   %a = fptrunc float %f to half
-  store half %a, half* %p
+  store half %a, ptr %p
   ret void
 }
 
 ; CHECK-LIBCALL-LABEL: test_fptrunc_double:
 ; CHECK-LIBCALL: %call16(__truncdfhf2)
-define void @test_fptrunc_double(double %d, half* %p) #0 {
+define void @test_fptrunc_double(double %d, ptr %p) #0 {
   %a = fptrunc double %d to half
-  store half %a, half* %p
+  store half %a, ptr %p
   ret void
 }
 
@@ -51,8 +51,8 @@ define void @test_fptrunc_double(double %d, half* %p) #0 {
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
-define <4 x float> @test_vec_fpext_float(<4 x half>* %p) #0 {
-  %a = load <4 x half>, <4 x half>* %p, align 8
+define <4 x float> @test_vec_fpext_float(ptr %p) #0 {
+  %a = load <4 x half>, ptr %p, align 8
   %b = fpext <4 x half> %a to <4 x float>
   ret <4 x float> %b
 }
@@ -68,8 +68,8 @@ define <4 x float> @test_vec_fpext_float(<4 x half>* %p) #0 {
 ; CHECK-LIBCALL: cvt.d.s
 ; CHECK-LIBCALL: %call16(__gnu_h2f_ieee)
 ; CHECK-LIBCALL: cvt.d.s
-define <4 x double> @test_vec_fpext_double(<4 x half>* %p) #0 {
-  %a = load <4 x half>, <4 x half>* %p, align 8
+define <4 x double> @test_vec_fpext_double(ptr %p) #0 {
+  %a = load <4 x half>, ptr %p, align 8
   %b = fpext <4 x half> %a to <4 x double>
   ret <4 x double> %b
 }
@@ -79,9 +79,9 @@ define <4 x double> @test_vec_fpext_double(<4 x half>* %p) #0 {
 ; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
 ; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
 ; CHECK-LIBCALL: %call16(__gnu_f2h_ieee)
-define void @test_vec_fptrunc_float(<4 x float> %a, <4 x half>* %p) #0 {
+define void @test_vec_fptrunc_float(<4 x float> %a, ptr %p) #0 {
   %b = fptrunc <4 x float> %a to <4 x half>
-  store <4 x half> %b, <4 x half>* %p, align 8
+  store <4 x half> %b, ptr %p, align 8
   ret void
 }
 
@@ -90,9 +90,9 @@ define void @test_vec_fptrunc_float(<4 x float> %a, <4 x half>* %p) #0 {
 ; CHECK-LIBCALL: %call16(__truncdfhf2)
 ; CHECK-LIBCALL: %call16(__truncdfhf2)
 ; CHECK-LIBCALL: %call16(__truncdfhf2)
-define void @test_vec_fptrunc_double(<4 x double> %a, <4 x half>* %p) #0 {
+define void @test_vec_fptrunc_double(<4 x double> %a, ptr %p) #0 {
   %b = fptrunc <4 x double> %a to <4 x half>
-  store <4 x half> %b, <4 x half>* %p, align 8
+  store <4 x half> %b, ptr %p, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/fp16instrinsmc.ll b/llvm/test/CodeGen/Mips/fp16instrinsmc.ll
index b3d36ba29624f..168b042f4d4aa 100644
--- a/llvm/test/CodeGen/Mips/fp16instrinsmc.ll
+++ b/llvm/test/CodeGen/Mips/fp16instrinsmc.ll
@@ -23,10 +23,10 @@ define void @foo1() #0 {
 ; fmask: .set	reorder
 ; fmask: .end	foo1
 entry:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @one, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @one, align 4
   %call = call float @copysignf(float %0, float %1) #2
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -39,10 +39,10 @@ define void @foo2() #0 {
 ; fmask:	save	{{.*}}
 ; fmask:	.end	foo2
 entry:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @negone, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @negone, align 4
   %call = call float @copysignf(float %0, float %1) #2
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -57,11 +57,11 @@ entry:
 ; fmask: .set	macro
 ; fmask: .set	reorder
 ; fmask: .end	foo3
-  %0 = load double, double* @xd, align 8
-  %1 = load float, float* @oned, align 4
+  %0 = load double, ptr @xd, align 8
+  %1 = load float, ptr @oned, align 4
   %conv = fpext float %1 to double
   %call = call double @copysign(double %0, double %conv) #2
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -74,19 +74,19 @@ entry:
 ; fmask:	.ent	foo4
 ; fmask:	save	{{.*}}
 ; fmask:	.end	foo4
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @negoned, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @negoned, align 8
   %call = call double @copysign(double %0, double %1) #2
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @foo5() #0 {
 entry:
-  %0 = load float, float* @xn, align 4
+  %0 = load float, ptr @xn, align 4
   %call = call float @fabsf(float %0) #2
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -96,9 +96,9 @@ declare float @fabsf(float) #1
 ; Function Attrs: nounwind
 define void @foo6() #0 {
 entry:
-  %0 = load double, double* @xdn, align 8
+  %0 = load double, ptr @xdn, align 8
   %call = call double @fabs(double %0) #2
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -108,11 +108,11 @@ declare double @fabs(double) #1
 ; Function Attrs: nounwind
 define void @foo7() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @sinf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sinf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -122,11 +122,11 @@ declare float @sinf(float) #0
 ; Function Attrs: nounwind
 define void @foo8() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @sin(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sin)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -136,11 +136,11 @@ declare double @sin(double) #0
 ; Function Attrs: nounwind
 define void @foo9() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @cosf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(cosf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -150,11 +150,11 @@ declare float @cosf(float) #0
 ; Function Attrs: nounwind
 define void @foo10() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @cos(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(cos)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -164,11 +164,11 @@ declare double @cos(double) #0
 ; Function Attrs: nounwind
 define void @foo11() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @sqrtf(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -178,11 +178,11 @@ declare float @sqrtf(float) #0
 ; Function Attrs: nounwind
 define void @foo12() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @sqrt(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -192,11 +192,11 @@ declare double @sqrt(double) #0
 ; Function Attrs: nounwind
 define void @foo13() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @floorf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(floorf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -206,11 +206,11 @@ declare float @floorf(float) #1
 ; Function Attrs: nounwind
 define void @foo14() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @floor(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(floor)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -220,11 +220,11 @@ declare double @floor(double) #1
 ; Function Attrs: nounwind
 define void @foo15() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @nearbyintf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -234,11 +234,11 @@ declare float @nearbyintf(float) #1
 ; Function Attrs: nounwind
 define void @foo16() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @nearbyint(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -248,11 +248,11 @@ declare double @nearbyint(double) #1
 ; Function Attrs: nounwind
 define void @foo17() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @ceilf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -262,11 +262,11 @@ declare float @ceilf(float) #1
 ; Function Attrs: nounwind
 define void @foo18() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @ceil(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(ceil)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -276,11 +276,11 @@ declare double @ceil(double) #1
 ; Function Attrs: nounwind
 define void @foo19() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @rintf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(rintf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -290,11 +290,11 @@ declare float @rintf(float) #1
 ; Function Attrs: nounwind
 define void @foo20() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @rint(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(rint)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -304,11 +304,11 @@ declare double @rint(double) #1
 ; Function Attrs: nounwind
 define void @foo21() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @truncf(float %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(truncf)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -318,11 +318,11 @@ declare float @truncf(float) #1
 ; Function Attrs: nounwind
 define void @foo22() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @trunc(double %0) #2
 ;pic:	lw	${{[0-9]+}}, %call16(trunc)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -332,11 +332,11 @@ declare double @trunc(double) #1
 ; Function Attrs: nounwind
 define void @foo23() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @log2f(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(log2f)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -346,11 +346,11 @@ declare float @log2f(float) #0
 ; Function Attrs: nounwind
 define void @foo24() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @log2(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(log2)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 
@@ -360,11 +360,11 @@ declare double @log2(double) #0
 ; Function Attrs: nounwind
 define void @foo25() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @exp2f(float %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}})
-  store float %call, float* @y, align 4
+  store float %call, ptr @y, align 4
   ret void
 }
 
@@ -374,11 +374,11 @@ declare float @exp2f(float) #0
 ; Function Attrs: nounwind
 define void @foo26() #0 {
 entry:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %call = call double @exp2(double %0) #3
 ;pic:	lw	${{[0-9]+}}, %call16(exp2)(${{[0-9]+}})
 ;pic:	lw	${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}})
-  store double %call, double* @yd, align 8
+  store double %call, ptr @yd, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/fp16static.ll b/llvm/test/CodeGen/Mips/fp16static.ll
index 730cce7629185..c598f7fac9a27 100644
--- a/llvm/test/CodeGen/Mips/fp16static.ll
+++ b/llvm/test/CodeGen/Mips/fp16static.ll
@@ -4,10 +4,10 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @x, align 4
   %mul = fmul float %0, %1
-  store float %mul, float* @x, align 4
+  store float %mul, ptr @x, align 4
 ; CHECK-STATIC16: jal	__mips16_mulsf3
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/fpneeded.ll b/llvm/test/CodeGen/Mips/fpneeded.ll
index a73f3b9204f3e..cc82f81a22285 100644
--- a/llvm/test/CodeGen/Mips/fpneeded.ll
+++ b/llvm/test/CodeGen/Mips/fpneeded.ll
@@ -40,7 +40,7 @@ entry:
 define void @vf(float %x) #0 {
 entry:
   %x.addr = alloca float, align 4
-  store float %x, float* %x.addr, align 4
+  store float %x, ptr %x.addr, align 4
   ret void
 }
 
@@ -58,7 +58,7 @@ entry:
 define void @vd(double %x) #0 {
 entry:
   %x.addr = alloca double, align 8
-  store double %x, double* %x.addr, align 8
+  store double %x, ptr %x.addr, align 8
   ret void
 }
 
@@ -75,11 +75,11 @@ entry:
 
 define void @foo1() #0 {
 entry:
-  store float 1.000000e+00, float* @zz, align 4
-  %0 = load float, float* @y, align 4
-  %1 = load float, float* @x, align 4
+  store float 1.000000e+00, ptr @zz, align 4
+  %0 = load float, ptr @y, align 4
+  %1 = load float, ptr @x, align 4
   %add = fadd float %0, %1
-  store float %add, float* @z, align 4
+  store float %add, ptr @z, align 4
   ret void
 }
 
@@ -96,7 +96,7 @@ entry:
 
 define void @foo2() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   call void @vf(float %0)
   ret void
 }
@@ -116,7 +116,7 @@ entry:
 define void @foo3() #0 {
 entry:
   %call = call float @fv()
-  store float %call, float* @x, align 4
+  store float %call, ptr @x, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/fpnotneeded.ll b/llvm/test/CodeGen/Mips/fpnotneeded.ll
index 59f01e25fab7d..d21bd53e949d2 100644
--- a/llvm/test/CodeGen/Mips/fpnotneeded.ll
+++ b/llvm/test/CodeGen/Mips/fpnotneeded.ll
@@ -19,7 +19,7 @@ entry:
 
 define i32 @iv() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   ret i32 %0
 }
 
@@ -34,8 +34,8 @@ define void @vif(i32 %i, float %f) #0 {
 entry:
   %i.addr = alloca i32, align 4
   %f.addr = alloca float, align 4
-  store i32 %i, i32* %i.addr, align 4
-  store float %f, float* %f.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  store float %f, ptr %f.addr, align 4
   ret void
 }
 
@@ -48,7 +48,7 @@ entry:
 
 define void @foo() #0 {
 entry:
-  store float 2.000000e+00, float* @f, align 4
+  store float 2.000000e+00, ptr @f, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/frame-address-err.ll b/llvm/test/CodeGen/Mips/frame-address-err.ll
index 086f62876a317..45f20ac8119c0 100644
--- a/llvm/test/CodeGen/Mips/frame-address-err.ll
+++ b/llvm/test/CodeGen/Mips/frame-address-err.ll
@@ -1,11 +1,11 @@
 ; RUN: not llc -march=mips < %s 2>&1 | FileCheck %s
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
 
-define i8* @f() nounwind {
+define ptr @f() nounwind {
 entry:
-  %0 = call i8* @llvm.frameaddress(i32 1)
-  ret i8* %0
+  %0 = call ptr @llvm.frameaddress(i32 1)
+  ret ptr %0
 
 ; CHECK: error: return address can be determined only for current frame
 }

diff  --git a/llvm/test/CodeGen/Mips/frame-address.ll b/llvm/test/CodeGen/Mips/frame-address.ll
index f7ceb575c65ae..685d1fe1f4651 100644
--- a/llvm/test/CodeGen/Mips/frame-address.ll
+++ b/llvm/test/CodeGen/Mips/frame-address.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=mipsel < %s | FileCheck %s
 
-declare i8* @llvm.frameaddress(i32) nounwind readnone
+declare ptr @llvm.frameaddress(i32) nounwind readnone
 
-define i8* @f() nounwind uwtable {
+define ptr @f() nounwind uwtable {
 ; CHECK-LABEL: f:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addiu $sp, $sp, -8
@@ -21,6 +21,6 @@ define i8* @f() nounwind uwtable {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 8
 entry:
-  %0 = call i8* @llvm.frameaddress(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.frameaddress(i32 0)
+  ret ptr %0
 }

diff  --git a/llvm/test/CodeGen/Mips/frameindex.ll b/llvm/test/CodeGen/Mips/frameindex.ll
index 3f436312e74f7..cee9777683cfa 100644
--- a/llvm/test/CodeGen/Mips/frameindex.ll
+++ b/llvm/test/CodeGen/Mips/frameindex.ll
@@ -13,8 +13,8 @@
 define i32 @k() {
 entry:
   %h = alloca i32, align 4
-  %call = call i32 @g(i32* %h)
+  %call = call i32 @g(ptr %h)
   ret i32 %call
 }
 
-declare i32 @g(i32*)
+declare i32 @g(ptr)

diff  --git a/llvm/test/CodeGen/Mips/global-address.ll b/llvm/test/CodeGen/Mips/global-address.ll
index 8142f0bc7df3d..88b5029af95bc 100644
--- a/llvm/test/CodeGen/Mips/global-address.ll
+++ b/llvm/test/CodeGen/Mips/global-address.ll
@@ -45,12 +45,12 @@ entry:
 ; STATIC-N64: daddiu $[[R3]], $[[R3]], %hi(g1)
 ; STATIC-N64: lw  ${{[0-9]+}}, %lo(g1)($[[R3]])
 
-  %0 = load i32, i32* @s1, align 4
+  %0 = load i32, ptr @s1, align 4
   tail call void @foo1(i32 %0) nounwind
-  %1 = load i32, i32* @g1, align 4
-  store i32 %1, i32* @s1, align 4
+  %1 = load i32, ptr @g1, align 4
+  store i32 %1, ptr @s1, align 4
   %add = add nsw i32 %1, 2
-  store i32 %add, i32* @g1, align 4
+  store i32 %add, ptr @g1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/global-pointer-reg.ll b/llvm/test/CodeGen/Mips/global-pointer-reg.ll
index 1c0eb01b67c11..3cb2aaed47669 100644
--- a/llvm/test/CodeGen/Mips/global-pointer-reg.ll
+++ b/llvm/test/CodeGen/Mips/global-pointer-reg.ll
@@ -15,10 +15,10 @@ entry:
 ; CHECK: addu   $[[GP:[0-9]+]], $[[R1]], $25
 ; CHECK: lw     ${{[0-9]+}}, %call16(foo2)($[[GP]])
 
-  tail call void @foo2(i32* @g0) nounwind
-  tail call void @foo2(i32* @g1) nounwind
-  tail call void @foo2(i32* @g2) nounwind
+  tail call void @foo2(ptr @g0) nounwind
+  tail call void @foo2(ptr @g1) nounwind
+  tail call void @foo2(ptr @g2) nounwind
   ret void
 }
 
-declare void @foo2(i32*)
+declare void @foo2(ptr)

diff  --git a/llvm/test/CodeGen/Mips/gpopt-explict-section.ll b/llvm/test/CodeGen/Mips/gpopt-explict-section.ll
index 0b7582614feca..2f5d1a0ab90b5 100644
--- a/llvm/test/CodeGen/Mips/gpopt-explict-section.ll
+++ b/llvm/test/CodeGen/Mips/gpopt-explict-section.ll
@@ -17,7 +17,7 @@
 
 define i32 @g() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([2 x i32], [2 x i32]* @a, i32 0, i32 0), align 4
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }
 
@@ -26,7 +26,7 @@ entry:
 
 define i32 @f() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i32 0), align 4
+  %0 = load i32, ptr @b, align 4
   ret i32 %0
 }
 
@@ -35,7 +35,7 @@ entry:
 
 define i32 @h() {
 entry:
-  %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i32 0), align 4
+  %0 = load i32, ptr @c, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll b/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll
index c55e9fa21c1e0..35cebdf302b5d 100644
--- a/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll
+++ b/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll
@@ -19,13 +19,13 @@ declare void @externalFunc()
 
 define internal fastcc void @internalFunc() nounwind noinline {
 entry:
-  %0 = load i32, i32* @g, align 4
+  %0 = load i32, ptr @g, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @g, align 4
+  store i32 %inc, ptr @g, align 4
   ret void
 }
 
-define void @no_lazy(void (i32)* %pf) {
+define void @no_lazy(ptr %pf) {
 
 ; CHECK-LABEL:  no_lazy
 ; CHECK-NOT:    gp_disp

diff  --git a/llvm/test/CodeGen/Mips/gprestore.ll b/llvm/test/CodeGen/Mips/gprestore.ll
index a1e696b0ac08b..889685022264b 100644
--- a/llvm/test/CodeGen/Mips/gprestore.ll
+++ b/llvm/test/CodeGen/Mips/gprestore.ll
@@ -210,11 +210,11 @@ define void @f0() nounwind {
 ; O3N32-NEXT:    addiu $sp, $sp, 32
 entry:
   tail call void @f1() nounwind
-  %tmp = load i32, i32* @p, align 4
+  %tmp = load i32, ptr @p, align 4
   tail call void @f2(i32 %tmp) nounwind
-  %tmp1 = load i32, i32* @q, align 4
+  %tmp1 = load i32, ptr @q, align 4
   tail call void @f2(i32 %tmp1) nounwind
-  %tmp2 = load i32, i32* @r, align 4
+  %tmp2 = load i32, ptr @r, align 4
   tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/helloworld.ll b/llvm/test/CodeGen/Mips/helloworld.ll
index f715313354ea3..152f8533fc36a 100644
--- a/llvm/test/CodeGen/Mips/helloworld.ll
+++ b/llvm/test/CodeGen/Mips/helloworld.ll
@@ -12,7 +12,7 @@
 
 define i32 @main() nounwind {
 entry:
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str, i32 0, i32 0))
+  %call = call i32 (ptr, ...) @printf(ptr @.str)
   ret i32 0
 
 ; SR: 	.set	mips16
@@ -55,4 +55,4 @@ entry:
 ;  SR32:  .set reorder
 ; SR:   .end main
 ; SR32:   .end main
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/hf16_1.ll b/llvm/test/CodeGen/Mips/hf16_1.ll
index aea241e271953..327f04f648598 100644
--- a/llvm/test/CodeGen/Mips/hf16_1.ll
+++ b/llvm/test/CodeGen/Mips/hf16_1.ll
@@ -11,96 +11,96 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   call void @v_sf(float %0)
-  %1 = load double, double* @xd, align 8
+  %1 = load double, ptr @xd, align 8
   call void @v_df(double %1)
-  %2 = load float, float* @x, align 4
-  %3 = load float, float* @y, align 4
+  %2 = load float, ptr @x, align 4
+  %3 = load float, ptr @y, align 4
   call void @v_sf_sf(float %2, float %3)
-  %4 = load double, double* @xd, align 8
-  %5 = load float, float* @x, align 4
+  %4 = load double, ptr @xd, align 8
+  %5 = load float, ptr @x, align 4
   call void @v_df_sf(double %4, float %5)
-  %6 = load double, double* @xd, align 8
-  %7 = load double, double* @yd, align 8
+  %6 = load double, ptr @xd, align 8
+  %7 = load double, ptr @yd, align 8
   call void @v_df_df(double %6, double %7)
   %call = call float @sf_v()
-  %8 = load float, float* @x, align 4
+  %8 = load float, ptr @x, align 4
   %call1 = call float @sf_sf(float %8)
-  %9 = load double, double* @xd, align 8
+  %9 = load double, ptr @xd, align 8
   %call2 = call float @sf_df(double %9)
-  %10 = load float, float* @x, align 4
-  %11 = load float, float* @y, align 4
+  %10 = load float, ptr @x, align 4
+  %11 = load float, ptr @y, align 4
   %call3 = call float @sf_sf_sf(float %10, float %11)
-  %12 = load double, double* @xd, align 8
-  %13 = load float, float* @x, align 4
+  %12 = load double, ptr @xd, align 8
+  %13 = load float, ptr @x, align 4
   %call4 = call float @sf_df_sf(double %12, float %13)
-  %14 = load double, double* @xd, align 8
-  %15 = load double, double* @yd, align 8
+  %14 = load double, ptr @xd, align 8
+  %15 = load double, ptr @yd, align 8
   %call5 = call float @sf_df_df(double %14, double %15)
   %call6 = call double @df_v()
-  %16 = load float, float* @x, align 4
+  %16 = load float, ptr @x, align 4
   %call7 = call double @df_sf(float %16)
-  %17 = load double, double* @xd, align 8
+  %17 = load double, ptr @xd, align 8
   %call8 = call double @df_df(double %17)
-  %18 = load float, float* @x, align 4
-  %19 = load float, float* @y, align 4
+  %18 = load float, ptr @x, align 4
+  %19 = load float, ptr @y, align 4
   %call9 = call double @df_sf_sf(float %18, float %19)
-  %20 = load double, double* @xd, align 8
-  %21 = load float, float* @x, align 4
+  %20 = load double, ptr @xd, align 8
+  %21 = load float, ptr @x, align 4
   %call10 = call double @df_df_sf(double %20, float %21)
-  %22 = load double, double* @xd, align 8
-  %23 = load double, double* @yd, align 8
+  %22 = load double, ptr @xd, align 8
+  %23 = load double, ptr @yd, align 8
   %call11 = call double @df_df_df(double %22, double %23)
   %call12 = call { float, float } @sc_v()
   %24 = extractvalue { float, float } %call12, 0
   %25 = extractvalue { float, float } %call12, 1
-  %26 = load float, float* @x, align 4
+  %26 = load float, ptr @x, align 4
   %call13 = call { float, float } @sc_sf(float %26)
   %27 = extractvalue { float, float } %call13, 0
   %28 = extractvalue { float, float } %call13, 1
-  %29 = load double, double* @xd, align 8
+  %29 = load double, ptr @xd, align 8
   %call14 = call { float, float } @sc_df(double %29)
   %30 = extractvalue { float, float } %call14, 0
   %31 = extractvalue { float, float } %call14, 1
-  %32 = load float, float* @x, align 4
-  %33 = load float, float* @y, align 4
+  %32 = load float, ptr @x, align 4
+  %33 = load float, ptr @y, align 4
   %call15 = call { float, float } @sc_sf_sf(float %32, float %33)
   %34 = extractvalue { float, float } %call15, 0
   %35 = extractvalue { float, float } %call15, 1
-  %36 = load double, double* @xd, align 8
-  %37 = load float, float* @x, align 4
+  %36 = load double, ptr @xd, align 8
+  %37 = load float, ptr @x, align 4
   %call16 = call { float, float } @sc_df_sf(double %36, float %37)
   %38 = extractvalue { float, float } %call16, 0
   %39 = extractvalue { float, float } %call16, 1
-  %40 = load double, double* @xd, align 8
-  %41 = load double, double* @yd, align 8
+  %40 = load double, ptr @xd, align 8
+  %41 = load double, ptr @yd, align 8
   %call17 = call { float, float } @sc_df_df(double %40, double %41)
   %42 = extractvalue { float, float } %call17, 0
   %43 = extractvalue { float, float } %call17, 1
   %call18 = call { double, double } @dc_v()
   %44 = extractvalue { double, double } %call18, 0
   %45 = extractvalue { double, double } %call18, 1
-  %46 = load float, float* @x, align 4
+  %46 = load float, ptr @x, align 4
   %call19 = call { double, double } @dc_sf(float %46)
   %47 = extractvalue { double, double } %call19, 0
   %48 = extractvalue { double, double } %call19, 1
-  %49 = load double, double* @xd, align 8
+  %49 = load double, ptr @xd, align 8
   %call20 = call { double, double } @dc_df(double %49)
   %50 = extractvalue { double, double } %call20, 0
   %51 = extractvalue { double, double } %call20, 1
-  %52 = load float, float* @x, align 4
-  %53 = load float, float* @y, align 4
+  %52 = load float, ptr @x, align 4
+  %53 = load float, ptr @y, align 4
   %call21 = call { double, double } @dc_sf_sf(float %52, float %53)
   %54 = extractvalue { double, double } %call21, 0
   %55 = extractvalue { double, double } %call21, 1
-  %56 = load double, double* @xd, align 8
-  %57 = load float, float* @x, align 4
+  %56 = load double, ptr @xd, align 8
+  %57 = load float, ptr @x, align 4
   %call22 = call { double, double } @dc_df_sf(double %56, float %57)
   %58 = extractvalue { double, double } %call22, 0
   %59 = extractvalue { double, double } %call22, 1
-  %60 = load double, double* @xd, align 8
-  %61 = load double, double* @yd, align 8
+  %60 = load double, ptr @xd, align 8
+  %61 = load double, ptr @yd, align 8
   %call23 = call { double, double } @dc_df_df(double %60, double %61)
   %62 = extractvalue { double, double } %call23, 0
   %63 = extractvalue { double, double } %call23, 1

diff  --git a/llvm/test/CodeGen/Mips/hf16call32.ll b/llvm/test/CodeGen/Mips/hf16call32.ll
index 616b9dce9907b..d44224f57d107 100644
--- a/llvm/test/CodeGen/Mips/hf16call32.ll
+++ b/llvm/test/CodeGen/Mips/hf16call32.ll
@@ -29,34 +29,34 @@
 ; Function Attrs: nounwind
 define void @clear() #0 {
 entry:
-  store float 1.000000e+00, float* @x, align 4
-  store float 1.000000e+00, float* @y, align 4
-  store double 1.000000e+00, double* @xd, align 8
-  store double 1.000000e+00, double* @yd, align 8
-  store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
-  store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
-  store float 1.000000e+00, float* @ret_sf, align 4
-  store double 1.000000e+00, double* @ret_df, align 8
-  store float 1.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
-  store double 1.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  store float 0.000000e+00, float* @lx, align 4
-  store float 0.000000e+00, float* @ly, align 4
-  store double 0.000000e+00, double* @lxd, align 8
-  store double 0.000000e+00, double* @lyd, align 8
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 0)
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lxy, i32 0, i32 1)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 0)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lxyd, i32 0, i32 1)
-  store float 0.000000e+00, float* @lret_sf, align 4
-  store double 0.000000e+00, double* @lret_df, align 8
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  store float 0.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  store double 0.000000e+00, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+  store float 1.000000e+00, ptr @x, align 4
+  store float 1.000000e+00, ptr @y, align 4
+  store double 1.000000e+00, ptr @xd, align 8
+  store double 1.000000e+00, ptr @yd, align 8
+  store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
+  store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+  store float 1.000000e+00, ptr @ret_sf, align 4
+  store double 1.000000e+00, ptr @ret_df, align 8
+  store float 1.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+  store double 1.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  store float 0.000000e+00, ptr @lx, align 4
+  store float 0.000000e+00, ptr @ly, align 4
+  store double 0.000000e+00, ptr @lxd, align 8
+  store double 0.000000e+00, ptr @lyd, align 8
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 0)
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lxy, i32 0, i32 1)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 0)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lxyd, i32 0, i32 1)
+  store float 0.000000e+00, ptr @lret_sf, align 4
+  store double 0.000000e+00, ptr @lret_df, align 8
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float 0.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double 0.000000e+00, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   ret void
 }
 
@@ -64,689 +64,689 @@ entry:
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   call void @clear()
-  store float 1.500000e+00, float* @lx, align 4
-  %0 = load float, float* @lx, align 4
+  store float 1.500000e+00, ptr @lx, align 4
+  %0 = load float, ptr @lx, align 4
   call void @v_sf(float %0)
-  %1 = load float, float* @x, align 4
+  %1 = load float, ptr @x, align 4
   %conv = fpext float %1 to double
-  %2 = load float, float* @lx, align 4
+  %2 = load float, ptr @lx, align 4
   %conv1 = fpext float %2 to double
-  %3 = load float, float* @x, align 4
-  %4 = load float, float* @lx, align 4
+  %3 = load float, ptr @x, align 4
+  %4 = load float, ptr @lx, align 4
   %cmp = fcmp oeq float %3, %4
   %conv2 = zext i1 %cmp to i32
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, double %conv, double %conv1, i32 %conv2)
   call void @clear()
-  store double 0x41678C29C0000000, double* @lxd, align 8
-  %5 = load double, double* @lxd, align 8
+  store double 0x41678C29C0000000, ptr @lxd, align 8
+  %5 = load double, ptr @lxd, align 8
   call void @v_df(double %5)
-  %6 = load double, double* @xd, align 8
-  %7 = load double, double* @lxd, align 8
-  %8 = load double, double* @xd, align 8
-  %9 = load double, double* @lxd, align 8
+  %6 = load double, ptr @xd, align 8
+  %7 = load double, ptr @lxd, align 8
+  %8 = load double, ptr @xd, align 8
+  %9 = load double, ptr @lxd, align 8
   %cmp3 = fcmp oeq double %8, %9
   %conv4 = zext i1 %cmp3 to i32
-  %call5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4)
+  %call5 = call i32 (ptr, ...) @printf(ptr @.str, double %6, double %7, i32 %conv4)
   call void @clear()
-  store float 9.000000e+00, float* @lx, align 4
-  store float 1.000000e+01, float* @ly, align 4
-  %10 = load float, float* @lx, align 4
-  %11 = load float, float* @ly, align 4
+  store float 9.000000e+00, ptr @lx, align 4
+  store float 1.000000e+01, ptr @ly, align 4
+  %10 = load float, ptr @lx, align 4
+  %11 = load float, ptr @ly, align 4
   call void @v_sf_sf(float %10, float %11)
-  %12 = load float, float* @x, align 4
+  %12 = load float, ptr @x, align 4
   %conv6 = fpext float %12 to double
-  %13 = load float, float* @lx, align 4
+  %13 = load float, ptr @lx, align 4
   %conv7 = fpext float %13 to double
-  %14 = load float, float* @y, align 4
+  %14 = load float, ptr @y, align 4
   %conv8 = fpext float %14 to double
-  %15 = load float, float* @ly, align 4
+  %15 = load float, ptr @ly, align 4
   %conv9 = fpext float %15 to double
-  %16 = load float, float* @x, align 4
-  %17 = load float, float* @lx, align 4
+  %16 = load float, ptr @x, align 4
+  %17 = load float, ptr @lx, align 4
   %cmp10 = fcmp oeq float %16, %17
   br i1 %cmp10, label %land.rhs, label %land.end
 
 land.rhs:                                         ; preds = %entry
-  %18 = load float, float* @y, align 4
-  %19 = load float, float* @ly, align 4
+  %18 = load float, ptr @y, align 4
+  %19 = load float, ptr @ly, align 4
   %cmp12 = fcmp oeq float %18, %19
   br label %land.end
 
 land.end:                                         ; preds = %land.rhs, %entry
   %20 = phi i1 [ false, %entry ], [ %cmp12, %land.rhs ]
   %land.ext = zext i1 %20 to i32
-  %call14 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext)
+  %call14 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv6, double %conv7, double %conv8, double %conv9, i32 %land.ext)
   call void @clear()
-  store float 0x3FFE666660000000, float* @lx, align 4
-  store double 0x4007E613249FF279, double* @lyd, align 8
-  %21 = load float, float* @lx, align 4
-  %22 = load double, double* @lyd, align 8
+  store float 0x3FFE666660000000, ptr @lx, align 4
+  store double 0x4007E613249FF279, ptr @lyd, align 8
+  %21 = load float, ptr @lx, align 4
+  %22 = load double, ptr @lyd, align 8
   call void @v_sf_df(float %21, double %22)
-  %23 = load float, float* @x, align 4
+  %23 = load float, ptr @x, align 4
   %conv15 = fpext float %23 to double
-  %24 = load float, float* @lx, align 4
+  %24 = load float, ptr @lx, align 4
   %conv16 = fpext float %24 to double
-  %25 = load double, double* @yd, align 8
-  %26 = load double, double* @lyd, align 8
-  %27 = load float, float* @x, align 4
-  %28 = load float, float* @lx, align 4
+  %25 = load double, ptr @yd, align 8
+  %26 = load double, ptr @lyd, align 8
+  %27 = load float, ptr @x, align 4
+  %28 = load float, ptr @lx, align 4
   %cmp17 = fcmp oeq float %27, %28
   %conv18 = zext i1 %cmp17 to i32
-  %29 = load double, double* @yd, align 8
-  %30 = load double, double* @lyd, align 8
+  %29 = load double, ptr @yd, align 8
+  %30 = load double, ptr @lyd, align 8
   %cmp19 = fcmp oeq double %29, %30
   %conv20 = zext i1 %cmp19 to i32
   %and = and i32 %conv18, %conv20
-  %call21 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv15, double %conv16, double %25, double %26, i32 %and)
+  %call21 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv15, double %conv16, double %25, double %26, i32 %and)
   call void @clear()
-  store double 0x4194E54F94000000, double* @lxd, align 8
-  store float 7.600000e+01, float* @ly, align 4
-  %31 = load double, double* @lxd, align 8
-  %32 = load float, float* @ly, align 4
+  store double 0x4194E54F94000000, ptr @lxd, align 8
+  store float 7.600000e+01, ptr @ly, align 4
+  %31 = load double, ptr @lxd, align 8
+  %32 = load float, ptr @ly, align 4
   call void @v_df_sf(double %31, float %32)
-  %33 = load double, double* @xd, align 8
-  %34 = load double, double* @lxd, align 8
-  %35 = load float, float* @y, align 4
+  %33 = load double, ptr @xd, align 8
+  %34 = load double, ptr @lxd, align 8
+  %35 = load float, ptr @y, align 4
   %conv22 = fpext float %35 to double
-  %36 = load float, float* @ly, align 4
+  %36 = load float, ptr @ly, align 4
   %conv23 = fpext float %36 to double
-  %37 = load double, double* @xd, align 8
-  %38 = load double, double* @lxd, align 8
+  %37 = load double, ptr @xd, align 8
+  %38 = load double, ptr @lxd, align 8
   %cmp24 = fcmp oeq double %37, %38
   %conv25 = zext i1 %cmp24 to i32
-  %39 = load float, float* @y, align 4
-  %40 = load float, float* @ly, align 4
+  %39 = load float, ptr @y, align 4
+  %40 = load float, ptr @ly, align 4
   %cmp26 = fcmp oeq float %39, %40
   %conv27 = zext i1 %cmp26 to i32
   %and28 = and i32 %conv25, %conv27
-  %call29 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %33, double %34, double %conv22, double %conv23, i32 %and28)
+  %call29 = call i32 (ptr, ...) @printf(ptr @.str1, double %33, double %34, double %conv22, double %conv23, i32 %and28)
   call void @clear()
-  store double 7.365198e+07, double* @lxd, align 8
-  store double 0x416536CD80000000, double* @lyd, align 8
-  %41 = load double, double* @lxd, align 8
-  %42 = load double, double* @lyd, align 8
+  store double 7.365198e+07, ptr @lxd, align 8
+  store double 0x416536CD80000000, ptr @lyd, align 8
+  %41 = load double, ptr @lxd, align 8
+  %42 = load double, ptr @lyd, align 8
   call void @v_df_df(double %41, double %42)
-  %43 = load double, double* @xd, align 8
-  %44 = load double, double* @lxd, align 8
-  %45 = load double, double* @yd, align 8
-  %46 = load double, double* @lyd, align 8
-  %47 = load double, double* @xd, align 8
-  %48 = load double, double* @lxd, align 8
+  %43 = load double, ptr @xd, align 8
+  %44 = load double, ptr @lxd, align 8
+  %45 = load double, ptr @yd, align 8
+  %46 = load double, ptr @lyd, align 8
+  %47 = load double, ptr @xd, align 8
+  %48 = load double, ptr @lxd, align 8
   %cmp30 = fcmp oeq double %47, %48
   %conv31 = zext i1 %cmp30 to i32
-  %49 = load double, double* @yd, align 8
-  %50 = load double, double* @lyd, align 8
+  %49 = load double, ptr @yd, align 8
+  %50 = load double, ptr @lyd, align 8
   %cmp32 = fcmp oeq double %49, %50
   %conv33 = zext i1 %cmp32 to i32
   %and34 = and i32 %conv31, %conv33
-  %call35 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %43, double %44, double %45, double %46, i32 %and34)
+  %call35 = call i32 (ptr, ...) @printf(ptr @.str1, double %43, double %44, double %45, double %46, i32 %and34)
   call void @clear()
-  store float 0x4016666660000000, float* @ret_sf, align 4
+  store float 0x4016666660000000, ptr @ret_sf, align 4
   %call36 = call float @sf_v()
-  store float %call36, float* @lret_sf, align 4
-  %51 = load float, float* @ret_sf, align 4
+  store float %call36, ptr @lret_sf, align 4
+  %51 = load float, ptr @ret_sf, align 4
   %conv37 = fpext float %51 to double
-  %52 = load float, float* @lret_sf, align 4
+  %52 = load float, ptr @lret_sf, align 4
   %conv38 = fpext float %52 to double
-  %53 = load float, float* @ret_sf, align 4
-  %54 = load float, float* @lret_sf, align 4
+  %53 = load float, ptr @ret_sf, align 4
+  %54 = load float, ptr @lret_sf, align 4
   %cmp39 = fcmp oeq float %53, %54
   %conv40 = zext i1 %cmp39 to i32
-  %call41 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40)
+  %call41 = call i32 (ptr, ...) @printf(ptr @.str, double %conv37, double %conv38, i32 %conv40)
   call void @clear()
-  store float 4.587300e+06, float* @ret_sf, align 4
-  store float 3.420000e+02, float* @lx, align 4
-  %55 = load float, float* @lx, align 4
+  store float 4.587300e+06, ptr @ret_sf, align 4
+  store float 3.420000e+02, ptr @lx, align 4
+  %55 = load float, ptr @lx, align 4
   %call42 = call float @sf_sf(float %55)
-  store float %call42, float* @lret_sf, align 4
-  %56 = load float, float* @ret_sf, align 4
+  store float %call42, ptr @lret_sf, align 4
+  %56 = load float, ptr @ret_sf, align 4
   %conv43 = fpext float %56 to double
-  %57 = load float, float* @lret_sf, align 4
+  %57 = load float, ptr @lret_sf, align 4
   %conv44 = fpext float %57 to double
-  %58 = load float, float* @x, align 4
+  %58 = load float, ptr @x, align 4
   %conv45 = fpext float %58 to double
-  %59 = load float, float* @lx, align 4
+  %59 = load float, ptr @lx, align 4
   %conv46 = fpext float %59 to double
-  %60 = load float, float* @ret_sf, align 4
-  %61 = load float, float* @lret_sf, align 4
+  %60 = load float, ptr @ret_sf, align 4
+  %61 = load float, ptr @lret_sf, align 4
   %cmp47 = fcmp oeq float %60, %61
   %conv48 = zext i1 %cmp47 to i32
-  %62 = load float, float* @x, align 4
-  %63 = load float, float* @lx, align 4
+  %62 = load float, ptr @x, align 4
+  %63 = load float, ptr @lx, align 4
   %cmp49 = fcmp oeq float %62, %63
   %conv50 = zext i1 %cmp49 to i32
   %and51 = and i32 %conv48, %conv50
-  %call52 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51)
+  %call52 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv43, double %conv44, double %conv45, double %conv46, i32 %and51)
   call void @clear()
-  store float 4.445910e+06, float* @ret_sf, align 4
-  store double 0x419A7DB294000000, double* @lxd, align 8
-  %64 = load double, double* @lxd, align 8
+  store float 4.445910e+06, ptr @ret_sf, align 4
+  store double 0x419A7DB294000000, ptr @lxd, align 8
+  %64 = load double, ptr @lxd, align 8
   %call53 = call float @sf_df(double %64)
-  store float %call53, float* @lret_sf, align 4
-  %65 = load float, float* @ret_sf, align 4
+  store float %call53, ptr @lret_sf, align 4
+  %65 = load float, ptr @ret_sf, align 4
   %conv54 = fpext float %65 to double
-  %66 = load float, float* @lret_sf, align 4
+  %66 = load float, ptr @lret_sf, align 4
   %conv55 = fpext float %66 to double
-  %67 = load double, double* @xd, align 8
-  %68 = load double, double* @lxd, align 8
-  %69 = load float, float* @ret_sf, align 4
-  %70 = load float, float* @lret_sf, align 4
+  %67 = load double, ptr @xd, align 8
+  %68 = load double, ptr @lxd, align 8
+  %69 = load float, ptr @ret_sf, align 4
+  %70 = load float, ptr @lret_sf, align 4
   %cmp56 = fcmp oeq float %69, %70
   %conv57 = zext i1 %cmp56 to i32
-  %71 = load double, double* @xd, align 8
-  %72 = load double, double* @lxd, align 8
+  %71 = load double, ptr @xd, align 8
+  %72 = load double, ptr @lxd, align 8
   %cmp58 = fcmp oeq double %71, %72
   %conv59 = zext i1 %cmp58 to i32
   %and60 = and i32 %conv57, %conv59
-  %call61 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %conv54, double %conv55, double %67, double %68, i32 %and60)
+  %call61 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv54, double %conv55, double %67, double %68, i32 %and60)
   call void @clear()
-  store float 0x3FFF4BC6A0000000, float* @ret_sf, align 4
-  store float 4.445500e+03, float* @lx, align 4
-  store float 0x4068ACCCC0000000, float* @ly, align 4
-  %73 = load float, float* @lx, align 4
-  %74 = load float, float* @ly, align 4
+  store float 0x3FFF4BC6A0000000, ptr @ret_sf, align 4
+  store float 4.445500e+03, ptr @lx, align 4
+  store float 0x4068ACCCC0000000, ptr @ly, align 4
+  %73 = load float, ptr @lx, align 4
+  %74 = load float, ptr @ly, align 4
   %call62 = call float @sf_sf_sf(float %73, float %74)
-  store float %call62, float* @lret_sf, align 4
-  %75 = load float, float* @ret_sf, align 4
+  store float %call62, ptr @lret_sf, align 4
+  %75 = load float, ptr @ret_sf, align 4
   %conv63 = fpext float %75 to double
-  %76 = load float, float* @lret_sf, align 4
+  %76 = load float, ptr @lret_sf, align 4
   %conv64 = fpext float %76 to double
-  %77 = load float, float* @x, align 4
+  %77 = load float, ptr @x, align 4
   %conv65 = fpext float %77 to double
-  %78 = load float, float* @lx, align 4
+  %78 = load float, ptr @lx, align 4
   %conv66 = fpext float %78 to double
-  %79 = load float, float* @y, align 4
+  %79 = load float, ptr @y, align 4
   %conv67 = fpext float %79 to double
-  %80 = load float, float* @ly, align 4
+  %80 = load float, ptr @ly, align 4
   %conv68 = fpext float %80 to double
-  %81 = load float, float* @ret_sf, align 4
-  %82 = load float, float* @lret_sf, align 4
+  %81 = load float, ptr @ret_sf, align 4
+  %82 = load float, ptr @lret_sf, align 4
   %cmp69 = fcmp oeq float %81, %82
   br i1 %cmp69, label %land.lhs.true, label %land.end76
 
 land.lhs.true:                                    ; preds = %land.end
-  %83 = load float, float* @x, align 4
-  %84 = load float, float* @lx, align 4
+  %83 = load float, ptr @x, align 4
+  %84 = load float, ptr @lx, align 4
   %cmp71 = fcmp oeq float %83, %84
   br i1 %cmp71, label %land.rhs73, label %land.end76
 
 land.rhs73:                                       ; preds = %land.lhs.true
-  %85 = load float, float* @y, align 4
-  %86 = load float, float* @ly, align 4
+  %85 = load float, ptr @y, align 4
+  %86 = load float, ptr @ly, align 4
   %cmp74 = fcmp oeq float %85, %86
   br label %land.end76
 
 land.end76:                                       ; preds = %land.rhs73, %land.lhs.true, %land.end
   %87 = phi i1 [ false, %land.lhs.true ], [ false, %land.end ], [ %cmp74, %land.rhs73 ]
   %land.ext77 = zext i1 %87 to i32
-  %call78 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77)
+  %call78 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv63, double %conv64, double %conv65, double %conv66, double %conv67, double %conv68, i32 %land.ext77)
   call void @clear()
-  store float 9.991300e+04, float* @ret_sf, align 4
-  store float 1.114500e+04, float* @lx, align 4
-  store double 9.994445e+07, double* @lyd, align 8
-  %88 = load float, float* @lx, align 4
-  %89 = load double, double* @lyd, align 8
+  store float 9.991300e+04, ptr @ret_sf, align 4
+  store float 1.114500e+04, ptr @lx, align 4
+  store double 9.994445e+07, ptr @lyd, align 8
+  %88 = load float, ptr @lx, align 4
+  %89 = load double, ptr @lyd, align 8
   %call79 = call float @sf_sf_df(float %88, double %89)
-  store float %call79, float* @lret_sf, align 4
-  %90 = load float, float* @ret_sf, align 4
+  store float %call79, ptr @lret_sf, align 4
+  %90 = load float, ptr @ret_sf, align 4
   %conv80 = fpext float %90 to double
-  %91 = load float, float* @lret_sf, align 4
+  %91 = load float, ptr @lret_sf, align 4
   %conv81 = fpext float %91 to double
-  %92 = load float, float* @x, align 4
+  %92 = load float, ptr @x, align 4
   %conv82 = fpext float %92 to double
-  %93 = load float, float* @lx, align 4
+  %93 = load float, ptr @lx, align 4
   %conv83 = fpext float %93 to double
-  %94 = load double, double* @yd, align 8
-  %95 = load double, double* @lyd, align 8
-  %96 = load float, float* @ret_sf, align 4
-  %97 = load float, float* @lret_sf, align 4
+  %94 = load double, ptr @yd, align 8
+  %95 = load double, ptr @lyd, align 8
+  %96 = load float, ptr @ret_sf, align 4
+  %97 = load float, ptr @lret_sf, align 4
   %cmp84 = fcmp oeq float %96, %97
   br i1 %cmp84, label %land.lhs.true86, label %land.end92
 
 land.lhs.true86:                                  ; preds = %land.end76
-  %98 = load float, float* @x, align 4
-  %99 = load float, float* @lx, align 4
+  %98 = load float, ptr @x, align 4
+  %99 = load float, ptr @lx, align 4
   %cmp87 = fcmp oeq float %98, %99
   br i1 %cmp87, label %land.rhs89, label %land.end92
 
 land.rhs89:                                       ; preds = %land.lhs.true86
-  %100 = load double, double* @yd, align 8
-  %101 = load double, double* @lyd, align 8
+  %100 = load double, ptr @yd, align 8
+  %101 = load double, ptr @lyd, align 8
   %cmp90 = fcmp oeq double %100, %101
   br label %land.end92
 
 land.end92:                                       ; preds = %land.rhs89, %land.lhs.true86, %land.end76
   %102 = phi i1 [ false, %land.lhs.true86 ], [ false, %land.end76 ], [ %cmp90, %land.rhs89 ]
   %land.ext93 = zext i1 %102 to i32
-  %call94 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93)
+  %call94 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv80, double %conv81, double %conv82, double %conv83, double %94, double %95, i32 %land.ext93)
   call void @clear()
-  store float 0x417CCC7A00000000, float* @ret_sf, align 4
-  store double 0x4172034530000000, double* @lxd, align 8
-  store float 4.456200e+04, float* @ly, align 4
-  %103 = load double, double* @lxd, align 8
-  %104 = load float, float* @ly, align 4
+  store float 0x417CCC7A00000000, ptr @ret_sf, align 4
+  store double 0x4172034530000000, ptr @lxd, align 8
+  store float 4.456200e+04, ptr @ly, align 4
+  %103 = load double, ptr @lxd, align 8
+  %104 = load float, ptr @ly, align 4
   %call95 = call float @sf_df_sf(double %103, float %104)
-  store float %call95, float* @lret_sf, align 4
-  %105 = load float, float* @ret_sf, align 4
+  store float %call95, ptr @lret_sf, align 4
+  %105 = load float, ptr @ret_sf, align 4
   %conv96 = fpext float %105 to double
-  %106 = load float, float* @lret_sf, align 4
+  %106 = load float, ptr @lret_sf, align 4
   %conv97 = fpext float %106 to double
-  %107 = load double, double* @xd, align 8
-  %108 = load double, double* @lxd, align 8
-  %109 = load float, float* @y, align 4
+  %107 = load double, ptr @xd, align 8
+  %108 = load double, ptr @lxd, align 8
+  %109 = load float, ptr @y, align 4
   %conv98 = fpext float %109 to double
-  %110 = load float, float* @ly, align 4
+  %110 = load float, ptr @ly, align 4
   %conv99 = fpext float %110 to double
-  %111 = load float, float* @ret_sf, align 4
-  %112 = load float, float* @lret_sf, align 4
+  %111 = load float, ptr @ret_sf, align 4
+  %112 = load float, ptr @lret_sf, align 4
   %cmp100 = fcmp oeq float %111, %112
   br i1 %cmp100, label %land.lhs.true102, label %land.end108
 
 land.lhs.true102:                                 ; preds = %land.end92
-  %113 = load double, double* @xd, align 8
-  %114 = load double, double* @lxd, align 8
+  %113 = load double, ptr @xd, align 8
+  %114 = load double, ptr @lxd, align 8
   %cmp103 = fcmp oeq double %113, %114
   br i1 %cmp103, label %land.rhs105, label %land.end108
 
 land.rhs105:                                      ; preds = %land.lhs.true102
-  %115 = load float, float* @y, align 4
-  %116 = load float, float* @ly, align 4
+  %115 = load float, ptr @y, align 4
+  %116 = load float, ptr @ly, align 4
   %cmp106 = fcmp oeq float %115, %116
   br label %land.end108
 
 land.end108:                                      ; preds = %land.rhs105, %land.lhs.true102, %land.end92
   %117 = phi i1 [ false, %land.lhs.true102 ], [ false, %land.end92 ], [ %cmp106, %land.rhs105 ]
   %land.ext109 = zext i1 %117 to i32
-  %call110 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109)
+  %call110 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv96, double %conv97, double %107, double %108, double %conv98, double %conv99, i32 %land.ext109)
   call void @clear()
-  store float 3.987721e+06, float* @ret_sf, align 4
-  store double 0x3FF1F49F6DDDC2D8, double* @lxd, align 8
-  store double 0x409129F306A2B170, double* @lyd, align 8
-  %118 = load double, double* @lxd, align 8
-  %119 = load double, double* @lyd, align 8
+  store float 3.987721e+06, ptr @ret_sf, align 4
+  store double 0x3FF1F49F6DDDC2D8, ptr @lxd, align 8
+  store double 0x409129F306A2B170, ptr @lyd, align 8
+  %118 = load double, ptr @lxd, align 8
+  %119 = load double, ptr @lyd, align 8
   %call111 = call float @sf_df_df(double %118, double %119)
-  store float %call111, float* @lret_sf, align 4
-  %120 = load float, float* @ret_sf, align 4
+  store float %call111, ptr @lret_sf, align 4
+  %120 = load float, ptr @ret_sf, align 4
   %conv112 = fpext float %120 to double
-  %121 = load float, float* @lret_sf, align 4
+  %121 = load float, ptr @lret_sf, align 4
   %conv113 = fpext float %121 to double
-  %122 = load double, double* @xd, align 8
-  %123 = load double, double* @lxd, align 8
-  %124 = load double, double* @yd, align 8
-  %125 = load double, double* @lyd, align 8
-  %126 = load float, float* @ret_sf, align 4
-  %127 = load float, float* @lret_sf, align 4
+  %122 = load double, ptr @xd, align 8
+  %123 = load double, ptr @lxd, align 8
+  %124 = load double, ptr @yd, align 8
+  %125 = load double, ptr @lyd, align 8
+  %126 = load float, ptr @ret_sf, align 4
+  %127 = load float, ptr @lret_sf, align 4
   %cmp114 = fcmp oeq float %126, %127
   br i1 %cmp114, label %land.lhs.true116, label %land.end122
 
 land.lhs.true116:                                 ; preds = %land.end108
-  %128 = load double, double* @xd, align 8
-  %129 = load double, double* @lxd, align 8
+  %128 = load double, ptr @xd, align 8
+  %129 = load double, ptr @lxd, align 8
   %cmp117 = fcmp oeq double %128, %129
   br i1 %cmp117, label %land.rhs119, label %land.end122
 
 land.rhs119:                                      ; preds = %land.lhs.true116
-  %130 = load double, double* @yd, align 8
-  %131 = load double, double* @lyd, align 8
+  %130 = load double, ptr @yd, align 8
+  %131 = load double, ptr @lyd, align 8
   %cmp120 = fcmp oeq double %130, %131
   br label %land.end122
 
 land.end122:                                      ; preds = %land.rhs119, %land.lhs.true116, %land.end108
   %132 = phi i1 [ false, %land.lhs.true116 ], [ false, %land.end108 ], [ %cmp120, %land.rhs119 ]
   %land.ext123 = zext i1 %132 to i32
-  %call124 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123)
+  %call124 = call i32 (ptr, ...) @printf(ptr @.str2, double %conv112, double %conv113, double %122, double %123, double %124, double %125, i32 %land.ext123)
   call void @clear()
-  store double 1.561234e+01, double* @ret_df, align 8
+  store double 1.561234e+01, ptr @ret_df, align 8
   %call125 = call double @df_v()
-  store double %call125, double* @lret_df, align 8
-  %133 = load double, double* @ret_df, align 8
-  %134 = load double, double* @lret_df, align 8
-  %135 = load double, double* @ret_df, align 8
-  %136 = load double, double* @lret_df, align 8
+  store double %call125, ptr @lret_df, align 8
+  %133 = load double, ptr @ret_df, align 8
+  %134 = load double, ptr @lret_df, align 8
+  %135 = load double, ptr @ret_df, align 8
+  %136 = load double, ptr @lret_df, align 8
   %cmp126 = fcmp oeq double %135, %136
   %conv127 = zext i1 %cmp126 to i32
-  %call128 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127)
+  %call128 = call i32 (ptr, ...) @printf(ptr @.str, double %133, double %134, i32 %conv127)
   call void @clear()
-  store double 1.345873e+01, double* @ret_df, align 8
-  store float 3.434520e+05, float* @lx, align 4
-  %137 = load float, float* @lx, align 4
+  store double 1.345873e+01, ptr @ret_df, align 8
+  store float 3.434520e+05, ptr @lx, align 4
+  %137 = load float, ptr @lx, align 4
   %call129 = call double @df_sf(float %137)
-  store double %call129, double* @lret_df, align 8
-  %138 = load double, double* @ret_df, align 8
-  %139 = load double, double* @lret_df, align 8
-  %140 = load float, float* @x, align 4
+  store double %call129, ptr @lret_df, align 8
+  %138 = load double, ptr @ret_df, align 8
+  %139 = load double, ptr @lret_df, align 8
+  %140 = load float, ptr @x, align 4
   %conv130 = fpext float %140 to double
-  %141 = load float, float* @lx, align 4
+  %141 = load float, ptr @lx, align 4
   %conv131 = fpext float %141 to double
-  %142 = load double, double* @ret_df, align 8
-  %143 = load double, double* @lret_df, align 8
+  %142 = load double, ptr @ret_df, align 8
+  %143 = load double, ptr @lret_df, align 8
   %cmp132 = fcmp oeq double %142, %143
   %conv133 = zext i1 %cmp132 to i32
-  %144 = load float, float* @x, align 4
-  %145 = load float, float* @lx, align 4
+  %144 = load float, ptr @x, align 4
+  %145 = load float, ptr @lx, align 4
   %cmp134 = fcmp oeq float %144, %145
   %conv135 = zext i1 %cmp134 to i32
   %and136 = and i32 %conv133, %conv135
-  %call137 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %138, double %139, double %conv130, double %conv131, i32 %and136)
+  %call137 = call i32 (ptr, ...) @printf(ptr @.str1, double %138, double %139, double %conv130, double %conv131, i32 %and136)
   call void @clear()
-  store double 0x4084F3AB7AA25D8D, double* @ret_df, align 8
-  store double 0x4114F671D2F1A9FC, double* @lxd, align 8
-  %146 = load double, double* @lxd, align 8
+  store double 0x4084F3AB7AA25D8D, ptr @ret_df, align 8
+  store double 0x4114F671D2F1A9FC, ptr @lxd, align 8
+  %146 = load double, ptr @lxd, align 8
   %call138 = call double @df_df(double %146)
-  store double %call138, double* @lret_df, align 8
-  %147 = load double, double* @ret_df, align 8
-  %148 = load double, double* @lret_df, align 8
-  %149 = load double, double* @xd, align 8
-  %150 = load double, double* @lxd, align 8
-  %151 = load double, double* @ret_df, align 8
-  %152 = load double, double* @lret_df, align 8
+  store double %call138, ptr @lret_df, align 8
+  %147 = load double, ptr @ret_df, align 8
+  %148 = load double, ptr @lret_df, align 8
+  %149 = load double, ptr @xd, align 8
+  %150 = load double, ptr @lxd, align 8
+  %151 = load double, ptr @ret_df, align 8
+  %152 = load double, ptr @lret_df, align 8
   %cmp139 = fcmp oeq double %151, %152
   %conv140 = zext i1 %cmp139 to i32
-  %153 = load double, double* @xd, align 8
-  %154 = load double, double* @lxd, align 8
+  %153 = load double, ptr @xd, align 8
+  %154 = load double, ptr @lxd, align 8
   %cmp141 = fcmp oeq double %153, %154
   %conv142 = zext i1 %cmp141 to i32
   %and143 = and i32 %conv140, %conv142
-  %call144 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str1, i32 0, i32 0), double %147, double %148, double %149, double %150, i32 %and143)
+  %call144 = call i32 (ptr, ...) @printf(ptr @.str1, double %147, double %148, double %149, double %150, i32 %and143)
   call void @clear()
-  store double 6.781956e+03, double* @ret_df, align 8
-  store float 4.445500e+03, float* @lx, align 4
-  store float 0x4068ACCCC0000000, float* @ly, align 4
-  %155 = load float, float* @lx, align 4
-  %156 = load float, float* @ly, align 4
+  store double 6.781956e+03, ptr @ret_df, align 8
+  store float 4.445500e+03, ptr @lx, align 4
+  store float 0x4068ACCCC0000000, ptr @ly, align 4
+  %155 = load float, ptr @lx, align 4
+  %156 = load float, ptr @ly, align 4
   %call145 = call double @df_sf_sf(float %155, float %156)
-  store double %call145, double* @lret_df, align 8
-  %157 = load double, double* @ret_df, align 8
-  %158 = load double, double* @lret_df, align 8
-  %159 = load float, float* @x, align 4
+  store double %call145, ptr @lret_df, align 8
+  %157 = load double, ptr @ret_df, align 8
+  %158 = load double, ptr @lret_df, align 8
+  %159 = load float, ptr @x, align 4
   %conv146 = fpext float %159 to double
-  %160 = load float, float* @lx, align 4
+  %160 = load float, ptr @lx, align 4
   %conv147 = fpext float %160 to double
-  %161 = load float, float* @y, align 4
+  %161 = load float, ptr @y, align 4
   %conv148 = fpext float %161 to double
-  %162 = load float, float* @ly, align 4
+  %162 = load float, ptr @ly, align 4
   %conv149 = fpext float %162 to double
-  %163 = load double, double* @ret_df, align 8
-  %164 = load double, double* @lret_df, align 8
+  %163 = load double, ptr @ret_df, align 8
+  %164 = load double, ptr @lret_df, align 8
   %cmp150 = fcmp oeq double %163, %164
   br i1 %cmp150, label %land.lhs.true152, label %land.end158
 
 land.lhs.true152:                                 ; preds = %land.end122
-  %165 = load float, float* @x, align 4
-  %166 = load float, float* @lx, align 4
+  %165 = load float, ptr @x, align 4
+  %166 = load float, ptr @lx, align 4
   %cmp153 = fcmp oeq float %165, %166
   br i1 %cmp153, label %land.rhs155, label %land.end158
 
 land.rhs155:                                      ; preds = %land.lhs.true152
-  %167 = load float, float* @y, align 4
-  %168 = load float, float* @ly, align 4
+  %167 = load float, ptr @y, align 4
+  %168 = load float, ptr @ly, align 4
   %cmp156 = fcmp oeq float %167, %168
   br label %land.end158
 
 land.end158:                                      ; preds = %land.rhs155, %land.lhs.true152, %land.end122
   %169 = phi i1 [ false, %land.lhs.true152 ], [ false, %land.end122 ], [ %cmp156, %land.rhs155 ]
   %land.ext159 = zext i1 %169 to i32
-  %call160 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159)
+  %call160 = call i32 (ptr, ...) @printf(ptr @.str2, double %157, double %158, double %conv146, double %conv147, double %conv148, double %conv149, i32 %land.ext159)
   call void @clear()
-  store double 1.889130e+05, double* @ret_df, align 8
-  store float 9.111450e+05, float* @lx, align 4
-  store double 0x4185320A58000000, double* @lyd, align 8
-  %170 = load float, float* @lx, align 4
-  %171 = load double, double* @lyd, align 8
+  store double 1.889130e+05, ptr @ret_df, align 8
+  store float 9.111450e+05, ptr @lx, align 4
+  store double 0x4185320A58000000, ptr @lyd, align 8
+  %170 = load float, ptr @lx, align 4
+  %171 = load double, ptr @lyd, align 8
   %call161 = call double @df_sf_df(float %170, double %171)
-  store double %call161, double* @lret_df, align 8
-  %172 = load double, double* @ret_df, align 8
-  %173 = load double, double* @lret_df, align 8
-  %174 = load float, float* @x, align 4
+  store double %call161, ptr @lret_df, align 8
+  %172 = load double, ptr @ret_df, align 8
+  %173 = load double, ptr @lret_df, align 8
+  %174 = load float, ptr @x, align 4
   %conv162 = fpext float %174 to double
-  %175 = load float, float* @lx, align 4
+  %175 = load float, ptr @lx, align 4
   %conv163 = fpext float %175 to double
-  %176 = load double, double* @yd, align 8
-  %177 = load double, double* @lyd, align 8
-  %178 = load double, double* @ret_df, align 8
-  %179 = load double, double* @lret_df, align 8
+  %176 = load double, ptr @yd, align 8
+  %177 = load double, ptr @lyd, align 8
+  %178 = load double, ptr @ret_df, align 8
+  %179 = load double, ptr @lret_df, align 8
   %cmp164 = fcmp oeq double %178, %179
   br i1 %cmp164, label %land.lhs.true166, label %land.end172
 
 land.lhs.true166:                                 ; preds = %land.end158
-  %180 = load float, float* @x, align 4
-  %181 = load float, float* @lx, align 4
+  %180 = load float, ptr @x, align 4
+  %181 = load float, ptr @lx, align 4
   %cmp167 = fcmp oeq float %180, %181
   br i1 %cmp167, label %land.rhs169, label %land.end172
 
 land.rhs169:                                      ; preds = %land.lhs.true166
-  %182 = load double, double* @yd, align 8
-  %183 = load double, double* @lyd, align 8
+  %182 = load double, ptr @yd, align 8
+  %183 = load double, ptr @lyd, align 8
   %cmp170 = fcmp oeq double %182, %183
   br label %land.end172
 
 land.end172:                                      ; preds = %land.rhs169, %land.lhs.true166, %land.end158
   %184 = phi i1 [ false, %land.lhs.true166 ], [ false, %land.end158 ], [ %cmp170, %land.rhs169 ]
   %land.ext173 = zext i1 %184 to i32
-  %call174 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173)
+  %call174 = call i32 (ptr, ...) @printf(ptr @.str2, double %172, double %173, double %conv162, double %conv163, double %176, double %177, i32 %land.ext173)
   call void @clear()
-  store double 0x418B2DB900000000, double* @ret_df, align 8
-  store double 0x41B1EF2ED3000000, double* @lxd, align 8
-  store float 1.244562e+06, float* @ly, align 4
-  %185 = load double, double* @lxd, align 8
-  %186 = load float, float* @ly, align 4
+  store double 0x418B2DB900000000, ptr @ret_df, align 8
+  store double 0x41B1EF2ED3000000, ptr @lxd, align 8
+  store float 1.244562e+06, ptr @ly, align 4
+  %185 = load double, ptr @lxd, align 8
+  %186 = load float, ptr @ly, align 4
   %call175 = call double @df_df_sf(double %185, float %186)
-  store double %call175, double* @lret_df, align 8
-  %187 = load double, double* @ret_df, align 8
-  %188 = load double, double* @lret_df, align 8
-  %189 = load double, double* @xd, align 8
-  %190 = load double, double* @lxd, align 8
-  %191 = load float, float* @y, align 4
+  store double %call175, ptr @lret_df, align 8
+  %187 = load double, ptr @ret_df, align 8
+  %188 = load double, ptr @lret_df, align 8
+  %189 = load double, ptr @xd, align 8
+  %190 = load double, ptr @lxd, align 8
+  %191 = load float, ptr @y, align 4
   %conv176 = fpext float %191 to double
-  %192 = load float, float* @ly, align 4
+  %192 = load float, ptr @ly, align 4
   %conv177 = fpext float %192 to double
-  %193 = load double, double* @ret_df, align 8
-  %194 = load double, double* @lret_df, align 8
+  %193 = load double, ptr @ret_df, align 8
+  %194 = load double, ptr @lret_df, align 8
   %cmp178 = fcmp oeq double %193, %194
   br i1 %cmp178, label %land.lhs.true180, label %land.end186
 
 land.lhs.true180:                                 ; preds = %land.end172
-  %195 = load double, double* @xd, align 8
-  %196 = load double, double* @lxd, align 8
+  %195 = load double, ptr @xd, align 8
+  %196 = load double, ptr @lxd, align 8
   %cmp181 = fcmp oeq double %195, %196
   br i1 %cmp181, label %land.rhs183, label %land.end186
 
 land.rhs183:                                      ; preds = %land.lhs.true180
-  %197 = load float, float* @y, align 4
-  %198 = load float, float* @ly, align 4
+  %197 = load float, ptr @y, align 4
+  %198 = load float, ptr @ly, align 4
   %cmp184 = fcmp oeq float %197, %198
   br label %land.end186
 
 land.end186:                                      ; preds = %land.rhs183, %land.lhs.true180, %land.end172
   %199 = phi i1 [ false, %land.lhs.true180 ], [ false, %land.end172 ], [ %cmp184, %land.rhs183 ]
   %land.ext187 = zext i1 %199 to i32
-  %call188 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187)
+  %call188 = call i32 (ptr, ...) @printf(ptr @.str2, double %187, double %188, double %189, double %190, double %conv176, double %conv177, i32 %land.ext187)
   call void @clear()
-  store double 3.987721e+06, double* @ret_df, align 8
-  store double 5.223560e+00, double* @lxd, align 8
-  store double 0x40B7D37CC1A8AC5C, double* @lyd, align 8
-  %200 = load double, double* @lxd, align 8
-  %201 = load double, double* @lyd, align 8
+  store double 3.987721e+06, ptr @ret_df, align 8
+  store double 5.223560e+00, ptr @lxd, align 8
+  store double 0x40B7D37CC1A8AC5C, ptr @lyd, align 8
+  %200 = load double, ptr @lxd, align 8
+  %201 = load double, ptr @lyd, align 8
   %call189 = call double @df_df_df(double %200, double %201)
-  store double %call189, double* @lret_df, align 8
-  %202 = load double, double* @ret_df, align 8
-  %203 = load double, double* @lret_df, align 8
-  %204 = load double, double* @xd, align 8
-  %205 = load double, double* @lxd, align 8
-  %206 = load double, double* @yd, align 8
-  %207 = load double, double* @lyd, align 8
-  %208 = load double, double* @ret_df, align 8
-  %209 = load double, double* @lret_df, align 8
+  store double %call189, ptr @lret_df, align 8
+  %202 = load double, ptr @ret_df, align 8
+  %203 = load double, ptr @lret_df, align 8
+  %204 = load double, ptr @xd, align 8
+  %205 = load double, ptr @lxd, align 8
+  %206 = load double, ptr @yd, align 8
+  %207 = load double, ptr @lyd, align 8
+  %208 = load double, ptr @ret_df, align 8
+  %209 = load double, ptr @lret_df, align 8
   %cmp190 = fcmp oeq double %208, %209
   br i1 %cmp190, label %land.lhs.true192, label %land.end198
 
 land.lhs.true192:                                 ; preds = %land.end186
-  %210 = load double, double* @xd, align 8
-  %211 = load double, double* @lxd, align 8
+  %210 = load double, ptr @xd, align 8
+  %211 = load double, ptr @lxd, align 8
   %cmp193 = fcmp oeq double %210, %211
   br i1 %cmp193, label %land.rhs195, label %land.end198
 
 land.rhs195:                                      ; preds = %land.lhs.true192
-  %212 = load double, double* @yd, align 8
-  %213 = load double, double* @lyd, align 8
+  %212 = load double, ptr @yd, align 8
+  %213 = load double, ptr @lyd, align 8
   %cmp196 = fcmp oeq double %212, %213
   br label %land.end198
 
 land.end198:                                      ; preds = %land.rhs195, %land.lhs.true192, %land.end186
   %214 = phi i1 [ false, %land.lhs.true192 ], [ false, %land.end186 ], [ %cmp196, %land.rhs195 ]
   %land.ext199 = zext i1 %214 to i32
-  %call200 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @.str2, i32 0, i32 0), double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
+  %call200 = call i32 (ptr, ...) @printf(ptr @.str2, double %202, double %203, double %204, double %205, double %206, double %207, i32 %land.ext199)
   call void @clear()
-  store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+  store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %call201 = call { float, float } @sc_v()
   %215 = extractvalue { float, float } %call201, 0
   %216 = extractvalue { float, float } %call201, 1
-  store float %215, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  store float %216, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
-  %ret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+  store float %215, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float %216, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+  %ret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv202 = fpext float %ret_sc.real to double
   %conv203 = fpext float %ret_sc.imag to double
-  %ret_sc.real204 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag205 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+  %ret_sc.real204 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag205 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv206 = fpext float %ret_sc.real204 to double
   %conv207 = fpext float %ret_sc.imag205 to double
-  %lret_sc.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %lret_sc.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv208 = fpext float %lret_sc.real to double
   %conv209 = fpext float %lret_sc.imag to double
-  %lret_sc.real210 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag211 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %lret_sc.real210 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag211 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv212 = fpext float %lret_sc.real210 to double
   %conv213 = fpext float %lret_sc.imag211 to double
-  %ret_sc.real214 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag215 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
-  %lret_sc.real216 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag217 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %ret_sc.real214 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag215 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+  %lret_sc.real216 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag217 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216
   %cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217
   %and.ri = and i1 %cmp.r, %cmp.i
   %conv218 = zext i1 %and.ri to i32
-  %call219 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
+  %call219 = call i32 (ptr, ...) @printf(ptr @.str3, double %conv202, double %conv207, double %conv208, double %conv213, i32 %conv218)
   call void @clear()
-  store float 0x3FF7A99300000000, float* @lx, align 4
-  store float 4.500000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  store float 7.000000e+00, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
-  %217 = load float, float* @lx, align 4
+  store float 0x3FF7A99300000000, ptr @lx, align 4
+  store float 4.500000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  store float 7.000000e+00, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+  %217 = load float, ptr @lx, align 4
   %call220 = call { float, float } @sc_sf(float %217)
   %218 = extractvalue { float, float } %call220, 0
   %219 = extractvalue { float, float } %call220, 1
-  store float %218, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  store float %219, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
-  %ret_sc.real221 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag222 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+  store float %218, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  store float %219, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
+  %ret_sc.real221 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag222 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv223 = fpext float %ret_sc.real221 to double
   %conv224 = fpext float %ret_sc.imag222 to double
-  %ret_sc.real225 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag226 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
+  %ret_sc.real225 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag226 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
   %conv227 = fpext float %ret_sc.real225 to double
   %conv228 = fpext float %ret_sc.imag226 to double
-  %lret_sc.real229 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag230 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %lret_sc.real229 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag230 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv231 = fpext float %lret_sc.real229 to double
   %conv232 = fpext float %lret_sc.imag230 to double
-  %lret_sc.real233 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag234 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %lret_sc.real233 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag234 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %conv235 = fpext float %lret_sc.real233 to double
   %conv236 = fpext float %lret_sc.imag234 to double
-  %220 = load float, float* @x, align 4
+  %220 = load float, ptr @x, align 4
   %conv237 = fpext float %220 to double
-  %221 = load float, float* @lx, align 4
+  %221 = load float, ptr @lx, align 4
   %conv238 = fpext float %221 to double
-  %ret_sc.real239 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 0)
-  %ret_sc.imag240 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @ret_sc, i32 0, i32 1)
-  %lret_sc.real241 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 0)
-  %lret_sc.imag242 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @lret_sc, i32 0, i32 1)
+  %ret_sc.real239 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 0)
+  %ret_sc.imag240 = load float, ptr getelementptr inbounds ({ float, float }, ptr @ret_sc, i32 0, i32 1)
+  %lret_sc.real241 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 0)
+  %lret_sc.imag242 = load float, ptr getelementptr inbounds ({ float, float }, ptr @lret_sc, i32 0, i32 1)
   %cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241
   %cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242
   %and.ri245 = and i1 %cmp.r243, %cmp.i244
   br i1 %and.ri245, label %land.rhs247, label %land.end250
 
 land.rhs247:                                      ; preds = %land.end198
-  %222 = load float, float* @x, align 4
-  %223 = load float, float* @lx, align 4
+  %222 = load float, ptr @x, align 4
+  %223 = load float, ptr @lx, align 4
   %cmp248 = fcmp oeq float %222, %223
   br label %land.end250
 
 land.end250:                                      ; preds = %land.rhs247, %land.end198
   %224 = phi i1 [ false, %land.end198 ], [ %cmp248, %land.rhs247 ]
   %land.ext251 = zext i1 %224 to i32
-  %call252 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
+  %call252 = call i32 (ptr, ...) @printf(ptr @.str4, double %conv223, double %conv228, double %conv231, double %conv236, double %conv237, double %conv238, i32 %land.ext251)
   call void @clear()
-  store double 1.234500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  store double 7.677000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
+  store double 1.234500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 7.677000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
   %call253 = call { double, double } @dc_v()
   %225 = extractvalue { double, double } %call253, 0
   %226 = extractvalue { double, double } %call253, 1
-  store double %225, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  store double %226, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %ret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %ret_dc.real254 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag255 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %lret_dc.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %lret_dc.real256 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag257 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %ret_dc.real258 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag259 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %lret_dc.real260 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag261 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+  store double %225, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double %226, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %ret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %ret_dc.real254 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag255 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %lret_dc.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %lret_dc.real256 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag257 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %ret_dc.real258 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag259 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %lret_dc.real260 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag261 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   %cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260
   %cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261
   %and.ri264 = and i1 %cmp.r262, %cmp.i263
   %conv265 = zext i1 %and.ri264 to i32
-  %call266 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @.str3, i32 0, i32 0), double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
+  %call266 = call i32 (ptr, ...) @printf(ptr @.str3, double %ret_dc.real, double %ret_dc.imag255, double %lret_dc.real, double %lret_dc.imag257, i32 %conv265)
   call void @clear()
-  store double 0x40AAF6F532617C1C, double* @lxd, align 8
-  store double 4.444500e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  store double 7.888000e+03, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %227 = load float, float* @lx, align 4
+  store double 0x40AAF6F532617C1C, ptr @lxd, align 8
+  store double 4.444500e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  store double 7.888000e+03, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %227 = load float, ptr @lx, align 4
   %call267 = call { double, double } @dc_sf(float %227)
   %228 = extractvalue { double, double } %call267, 0
   %229 = extractvalue { double, double } %call267, 1
-  store double %228, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  store double %229, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %ret_dc.real268 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag269 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %ret_dc.real270 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag271 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %lret_dc.real272 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag273 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %lret_dc.real274 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag275 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
-  %230 = load float, float* @x, align 4
+  store double %228, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  store double %229, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %ret_dc.real268 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag269 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %ret_dc.real270 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag271 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %lret_dc.real272 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag273 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %lret_dc.real274 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag275 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
+  %230 = load float, ptr @x, align 4
   %conv276 = fpext float %230 to double
-  %231 = load float, float* @lx, align 4
+  %231 = load float, ptr @lx, align 4
   %conv277 = fpext float %231 to double
-  %ret_dc.real278 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 0)
-  %ret_dc.imag279 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @ret_dc, i32 0, i32 1)
-  %lret_dc.real280 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 0)
-  %lret_dc.imag281 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @lret_dc, i32 0, i32 1)
+  %ret_dc.real278 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 0)
+  %ret_dc.imag279 = load double, ptr getelementptr inbounds ({ double, double }, ptr @ret_dc, i32 0, i32 1)
+  %lret_dc.real280 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 0)
+  %lret_dc.imag281 = load double, ptr getelementptr inbounds ({ double, double }, ptr @lret_dc, i32 0, i32 1)
   %cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280
   %cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281
   %and.ri284 = and i1 %cmp.r282, %cmp.i283
   br i1 %and.ri284, label %land.rhs286, label %land.end289
 
 land.rhs286:                                      ; preds = %land.end250
-  %232 = load float, float* @x, align 4
-  %233 = load float, float* @lx, align 4
+  %232 = load float, ptr @x, align 4
+  %233 = load float, ptr @lx, align 4
   %cmp287 = fcmp oeq float %232, %233
   br label %land.end289
 
 land.end289:                                      ; preds = %land.rhs286, %land.end250
   %234 = phi i1 [ false, %land.end250 ], [ %cmp287, %land.rhs286 ]
   %land.ext290 = zext i1 %234 to i32
-  %call291 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290)
-  %235 = load i32, i32* %retval
+  %call291 = call i32 (ptr, ...) @printf(ptr @.str4, double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290)
+  %235 = load i32, ptr %retval
   ret i32 %235
 }
 
@@ -759,7 +759,7 @@ declare void @v_sf(float) #1
 ; stel: jr $25
 ; stel: .end __call_stub_fp_v_sf
 
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
 
 declare void @v_df(double) #1
 ; stel: .section .mips16.call.fp.v_df,"ax", at progbits

diff  --git a/llvm/test/CodeGen/Mips/hf16call32_body.ll b/llvm/test/CodeGen/Mips/hf16call32_body.ll
index 420148728a69c..88cc02a9f7952 100644
--- a/llvm/test/CodeGen/Mips/hf16call32_body.ll
+++ b/llvm/test/CodeGen/Mips/hf16call32_body.ll
@@ -13,9 +13,9 @@
 define void @v_sf(float %p) #0 {
 entry:
   %p.addr = alloca float, align 4
-  store float %p, float* %p.addr, align 4
-  %0 = load float, float* %p.addr, align 4
-  store float %0, float* @x, align 4
+  store float %p, ptr %p.addr, align 4
+  %0 = load float, ptr %p.addr, align 4
+  store float %0, ptr @x, align 4
   ret void
 }
 ; stel: .section .mips16.fn.v_sf,"ax", at progbits
@@ -27,15 +27,15 @@ entry:
 ; stel: .set $__fn_local_v_sf, v_sf
 ; stel: .end __fn_stub_v_sf
 
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
 
 ; Function Attrs: nounwind
 define void @v_df(double %p) #0 {
 entry:
   %p.addr = alloca double, align 8
-  store double %p, double* %p.addr, align 8
-  %0 = load double, double* %p.addr, align 8
-  store double %0, double* @xd, align 8
+  store double %p, ptr %p.addr, align 8
+  %0 = load double, ptr %p.addr, align 8
+  store double %0, ptr @xd, align 8
   ret void
 }
 
@@ -54,12 +54,12 @@ define void @v_sf_sf(float %p1, float %p2) #0 {
 entry:
   %p1.addr = alloca float, align 4
   %p2.addr = alloca float, align 4
-  store float %p1, float* %p1.addr, align 4
-  store float %p2, float* %p2.addr, align 4
-  %0 = load float, float* %p1.addr, align 4
-  store float %0, float* @x, align 4
-  %1 = load float, float* %p2.addr, align 4
-  store float %1, float* @y, align 4
+  store float %p1, ptr %p1.addr, align 4
+  store float %p2, ptr %p2.addr, align 4
+  %0 = load float, ptr %p1.addr, align 4
+  store float %0, ptr @x, align 4
+  %1 = load float, ptr %p2.addr, align 4
+  store float %1, ptr @y, align 4
   ret void
 }
 
@@ -78,12 +78,12 @@ define void @v_sf_df(float %p1, double %p2) #0 {
 entry:
   %p1.addr = alloca float, align 4
   %p2.addr = alloca double, align 8
-  store float %p1, float* %p1.addr, align 4
-  store double %p2, double* %p2.addr, align 8
-  %0 = load float, float* %p1.addr, align 4
-  store float %0, float* @x, align 4
-  %1 = load double, double* %p2.addr, align 8
-  store double %1, double* @yd, align 8
+  store float %p1, ptr %p1.addr, align 4
+  store double %p2, ptr %p2.addr, align 8
+  %0 = load float, ptr %p1.addr, align 4
+  store float %0, ptr @x, align 4
+  %1 = load double, ptr %p2.addr, align 8
+  store double %1, ptr @yd, align 8
   ret void
 }
 
@@ -103,12 +103,12 @@ define void @v_df_sf(double %p1, float %p2) #0 {
 entry:
   %p1.addr = alloca double, align 8
   %p2.addr = alloca float, align 4
-  store double %p1, double* %p1.addr, align 8
-  store float %p2, float* %p2.addr, align 4
-  %0 = load double, double* %p1.addr, align 8
-  store double %0, double* @xd, align 8
-  %1 = load float, float* %p2.addr, align 4
-  store float %1, float* @y, align 4
+  store double %p1, ptr %p1.addr, align 8
+  store float %p2, ptr %p2.addr, align 4
+  %0 = load double, ptr %p1.addr, align 8
+  store double %0, ptr @xd, align 8
+  %1 = load float, ptr %p2.addr, align 4
+  store float %1, ptr @y, align 4
   ret void
 }
 
@@ -128,12 +128,12 @@ define void @v_df_df(double %p1, double %p2) #0 {
 entry:
   %p1.addr = alloca double, align 8
   %p2.addr = alloca double, align 8
-  store double %p1, double* %p1.addr, align 8
-  store double %p2, double* %p2.addr, align 8
-  %0 = load double, double* %p1.addr, align 8
-  store double %0, double* @xd, align 8
-  %1 = load double, double* %p2.addr, align 8
-  store double %1, double* @yd, align 8
+  store double %p1, ptr %p1.addr, align 8
+  store double %p2, ptr %p2.addr, align 8
+  %0 = load double, ptr %p1.addr, align 8
+  store double %0, ptr @xd, align 8
+  %1 = load double, ptr %p2.addr, align 8
+  store double %1, ptr @yd, align 8
   ret void
 }
 
@@ -152,7 +152,7 @@ entry:
 ; Function Attrs: nounwind
 define float @sf_v() #0 {
 entry:
-  %0 = load float, float* @ret_sf, align 4
+  %0 = load float, ptr @ret_sf, align 4
   ret float %0
 }
 
@@ -160,10 +160,10 @@ entry:
 define float @sf_sf(float %p) #0 {
 entry:
   %p.addr = alloca float, align 4
-  store float %p, float* %p.addr, align 4
-  %0 = load float, float* %p.addr, align 4
-  store float %0, float* @x, align 4
-  %1 = load float, float* @ret_sf, align 4
+  store float %p, ptr %p.addr, align 4
+  %0 = load float, ptr %p.addr, align 4
+  store float %0, ptr @x, align 4
+  %1 = load float, ptr @ret_sf, align 4
   ret float %1
 }
 
@@ -182,10 +182,10 @@ entry:
 define float @sf_df(double %p) #0 {
 entry:
   %p.addr = alloca double, align 8
-  store double %p, double* %p.addr, align 8
-  %0 = load double, double* %p.addr, align 8
-  store double %0, double* @xd, align 8
-  %1 = load float, float* @ret_sf, align 4
+  store double %p, ptr %p.addr, align 8
+  %0 = load double, ptr %p.addr, align 8
+  store double %0, ptr @xd, align 8
+  %1 = load float, ptr @ret_sf, align 4
   ret float %1
 }
 
@@ -204,13 +204,13 @@ define float @sf_sf_sf(float %p1, float %p2) #0 {
 entry:
   %p1.addr = alloca float, align 4
   %p2.addr = alloca float, align 4
-  store float %p1, float* %p1.addr, align 4
-  store float %p2, float* %p2.addr, align 4
-  %0 = load float, float* %p1.addr, align 4
-  store float %0, float* @x, align 4
-  %1 = load float, float* %p2.addr, align 4
-  store float %1, float* @y, align 4
-  %2 = load float, float* @ret_sf, align 4
+  store float %p1, ptr %p1.addr, align 4
+  store float %p2, ptr %p2.addr, align 4
+  %0 = load float, ptr %p1.addr, align 4
+  store float %0, ptr @x, align 4
+  %1 = load float, ptr %p2.addr, align 4
+  store float %1, ptr @y, align 4
+  %2 = load float, ptr @ret_sf, align 4
   ret float %2
 }
 
@@ -229,13 +229,13 @@ define float @sf_sf_df(float %p1, double %p2) #0 {
 entry:
   %p1.addr = alloca float, align 4
   %p2.addr = alloca double, align 8
-  store float %p1, float* %p1.addr, align 4
-  store double %p2, double* %p2.addr, align 8
-  %0 = load float, float* %p1.addr, align 4
-  store float %0, float* @x, align 4
-  %1 = load double, double* %p2.addr, align 8
-  store double %1, double* @yd, align 8
-  %2 = load float, float* @ret_sf, align 4
+  store float %p1, ptr %p1.addr, align 4
+  store double %p2, ptr %p2.addr, align 8
+  %0 = load float, ptr %p1.addr, align 4
+  store float %0, ptr @x, align 4
+  %1 = load double, ptr %p2.addr, align 8
+  store double %1, ptr @yd, align 8
+  %2 = load float, ptr @ret_sf, align 4
   ret float %2
 }
 
@@ -255,13 +255,13 @@ define float @sf_df_sf(double %p1, float %p2) #0 {
 entry:
   %p1.addr = alloca double, align 8
   %p2.addr = alloca float, align 4
-  store double %p1, double* %p1.addr, align 8
-  store float %p2, float* %p2.addr, align 4
-  %0 = load double, double* %p1.addr, align 8
-  store double %0, double* @xd, align 8
-  %1 = load float, float* %p2.addr, align 4
-  store float %1, float* @y, align 4
-  %2 = load float, float* @ret_sf, align 4
+  store double %p1, ptr %p1.addr, align 8
+  store float %p2, ptr %p2.addr, align 4
+  %0 = load double, ptr %p1.addr, align 8
+  store double %0, ptr @xd, align 8
+  %1 = load float, ptr %p2.addr, align 4
+  store float %1, ptr @y, align 4
+  %2 = load float, ptr @ret_sf, align 4
   ret float %2
 }
 
@@ -281,13 +281,13 @@ define float @sf_df_df(double %p1, double %p2) #0 {
 entry:
   %p1.addr = alloca double, align 8
   %p2.addr = alloca double, align 8
-  store double %p1, double* %p1.addr, align 8
-  store double %p2, double* %p2.addr, align 8
-  %0 = load double, double* %p1.addr, align 8
-  store double %0, double* @xd, align 8
-  %1 = load double, double* %p2.addr, align 8
-  store double %1, double* @yd, align 8
-  %2 = load float, float* @ret_sf, align 4
+  store double %p1, ptr %p1.addr, align 8
+  store double %p2, ptr %p2.addr, align 8
+  %0 = load double, ptr %p1.addr, align 8
+  store double %0, ptr @xd, align 8
+  %1 = load double, ptr %p2.addr, align 8
+  store double %1, ptr @yd, align 8
+  %2 = load float, ptr @ret_sf, align 4
   ret float %2
 }
 

diff  --git a/llvm/test/CodeGen/Mips/hf1_body.ll b/llvm/test/CodeGen/Mips/hf1_body.ll
index b6469716176fb..184ea31bddc9d 100644
--- a/llvm/test/CodeGen/Mips/hf1_body.ll
+++ b/llvm/test/CodeGen/Mips/hf1_body.ll
@@ -13,9 +13,9 @@
 define void @v_sf(float %p) #0 {
 entry:
   %p.addr = alloca float, align 4
-  store float %p, float* %p.addr, align 4
-  %0 = load float, float* %p.addr, align 4
-  store float %0, float* @x, align 4
+  store float %p, ptr %p.addr, align 4
+  %0 = load float, ptr %p.addr, align 4
+  store float %0, ptr @x, align 4
   ret void
 }
 ; ALL-LABEL: .ent __fn_stub_v_sf

diff  --git a/llvm/test/CodeGen/Mips/hfptrcall.ll b/llvm/test/CodeGen/Mips/hfptrcall.ll
index 50e8de24d929f..c178b1e26cdce 100644
--- a/llvm/test/CodeGen/Mips/hfptrcall.ll
+++ b/llvm/test/CodeGen/Mips/hfptrcall.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mattr=mips16 -relocation-model=pic < %s | FileCheck %s -check-prefix=picel
 
- at ptrsv = global float ()* @sv, align 4
- at ptrdv = global double ()* @dv, align 4
- at ptrscv = global { float, float } ()* @scv, align 4
- at ptrdcv = global { double, double } ()* @dcv, align 4
+ at ptrsv = global ptr @sv, align 4
+ at ptrdv = global ptr @dv, align 4
+ at ptrscv = global ptr @scv, align 4
+ at ptrdcv = global ptr @dcv, align 4
 @x = common global float 0.000000e+00, align 4
 @.str = private unnamed_addr constant [4 x i8] c"%f\0A\00", align 1
 @xd = common global double 0.000000e+00, align 8
@@ -34,11 +34,11 @@ entry:
 define { float, float } @scv() #0 {
 entry:
   %retval = alloca { float, float }, align 4
-  %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
-  %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
-  store float 5.000000e+00, float* %real
-  store float 9.900000e+01, float* %imag
-  %0 = load { float, float }, { float, float }* %retval
+  %real = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+  %imag = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+  store float 5.000000e+00, ptr %real
+  store float 9.900000e+01, ptr %imag
+  %0 = load { float, float }, ptr %retval
   ret { float, float } %0
 }
 
@@ -50,11 +50,11 @@ entry:
 define { double, double } @dcv() #0 {
 entry:
   %retval = alloca { double, double }, align 8
-  %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
-  %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
-  store double 0x416BC8B0A0000000, double* %real
-  store double 0x41CDCCB763800000, double* %imag
-  %0 = load { double, double }, { double, double }* %retval
+  %real = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
+  %imag = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1
+  store double 0x416BC8B0A0000000, ptr %real
+  store double 0x41CDCCB763800000, ptr %imag
+  %0 = load { double, double }, ptr %retval
   ret { double, double } %0
 }
 
@@ -65,43 +65,43 @@ entry:
 ; Function Attrs: nounwind
 define i32 @main() #0 {
 entry:
-  %0 = load float ()*, float ()** @ptrsv, align 4
+  %0 = load ptr, ptr @ptrsv, align 4
   %call = call float %0()
-  store float %call, float* @x, align 4
-  %1 = load float, float* @x, align 4
+  store float %call, ptr @x, align 4
+  %1 = load float, ptr @x, align 4
   %conv = fpext float %1 to double
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %conv)
-  %2 = load double ()*, double ()** @ptrdv, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, double %conv)
+  %2 = load ptr, ptr @ptrdv, align 4
   %call2 = call double %2()
-  store double %call2, double* @xd, align 8
-  %3 = load double, double* @xd, align 8
-  %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double %3)
-  %4 = load { float, float } ()*, { float, float } ()** @ptrscv, align 4
+  store double %call2, ptr @xd, align 8
+  %3 = load double, ptr @xd, align 8
+  %call3 = call i32 (ptr, ...) @printf(ptr @.str, double %3)
+  %4 = load ptr, ptr @ptrscv, align 4
   %call4 = call { float, float } %4()
   %5 = extractvalue { float, float } %call4, 0
   %6 = extractvalue { float, float } %call4, 1
-  store float %5, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
-  store float %6, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
-  %xy.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
-  %xy.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
+  store float %5, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  store float %6, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
+  %xy.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  %xy.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
   %conv5 = fpext float %xy.real to double
   %conv6 = fpext float %xy.imag to double
-  %xy.real7 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 0)
-  %xy.imag8 = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @xy, i32 0, i32 1)
+  %xy.real7 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 0)
+  %xy.imag8 = load float, ptr getelementptr inbounds ({ float, float }, ptr @xy, i32 0, i32 1)
   %conv9 = fpext float %xy.real7 to double
   %conv10 = fpext float %xy.imag8 to double
-  %call11 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10)
-  %7 = load { double, double } ()*, { double, double } ()** @ptrdcv, align 4
+  %call11 = call i32 (ptr, ...) @printf(ptr @.str1, double %conv5, double %conv10)
+  %7 = load ptr, ptr @ptrdcv, align 4
   %call12 = call { double, double } %7()
   %8 = extractvalue { double, double } %call12, 0
   %9 = extractvalue { double, double } %call12, 1
-  store double %8, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
-  store double %9, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
-  %xyd.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
-  %xyd.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
-  %xyd.real13 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 0)
-  %xyd.imag14 = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @xyd, i32 0, i32 1)
-  %call15 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14)
+  store double %8, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  store double %9, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+  %xyd.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  %xyd.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+  %xyd.real13 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 0)
+  %xyd.imag14 = load double, ptr getelementptr inbounds ({ double, double }, ptr @xyd, i32 0, i32 1)
+  %call15 = call i32 (ptr, ...) @printf(ptr @.str1, double %xyd.real, double %xyd.imag14)
   ret i32 0
 }
 
@@ -116,7 +116,7 @@ entry:
 ; picel:	lw	${{[0-9]+}}, %got(__mips16_call_stub_dc_0)(${{[0-9]+}})
 
 
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Mips/i32k.ll b/llvm/test/CodeGen/Mips/i32k.ll
index 57a2e788a301c..130a467433fcd 100644
--- a/llvm/test/CodeGen/Mips/i32k.ll
+++ b/llvm/test/CodeGen/Mips/i32k.ll
@@ -4,14 +4,14 @@
 
 define i32 @main() nounwind {
 entry:
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 1075344593) nounwind
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 1075344593) nounwind
 ; 16:	lw	${{[0-9]+}}, 1f
 ; 16:	b	2f
 ; 16:	.align	2
 ; 16: 1: 	.word	1075344593
 ; 16: 2:
 
-  %call1 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 -1075344593) nounwind
+  %call1 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 -1075344593) nounwind
 
 ; 16:	lw	${{[0-9]+}}, 1f
 ; 16:	b	2f
@@ -21,4 +21,4 @@ entry:
   ret i32 0
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll b/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
index 0d598abc15608..d1680768a8009 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
@@ -25,7 +25,7 @@
 ; RUN:   -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
 ; RUN:   -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R6
 
-define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
+define void @fooNonTail(ptr nocapture %f1) nounwind {
 ; MIPS32R2-LABEL: fooNonTail:
 ; MIPS32R2:       # %bb.0: # %entry
 ; MIPS32R2-NEXT:    addiu $sp, $sp, -24
@@ -118,7 +118,7 @@ entry:
   ret void
 }
 
-define i32 @fooTail(i32 (i32)* nocapture %f1) nounwind {
+define i32 @fooTail(ptr nocapture %f1) nounwind {
 ; MIPS32R2-LABEL: fooTail:
 ; MIPS32R2:       # %bb.0: # %entry
 ; MIPS32R2-NEXT:    move $25, $4

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll b/llvm/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
index efa0759090006..b079169974d8b 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
@@ -34,7 +34,7 @@
 @.str.6 = private unnamed_addr constant [2 x i8] c"G\00", align 1
 @.str.7 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
 
-define i8* @_Z3fooi(i32 signext %Letter) {
+define ptr @_Z3fooi(i32 signext %Letter) {
 ; MIPS32R2-LABEL: _Z3fooi:
 ; MIPS32R2:       # %bb.0: # %entry
 ; MIPS32R2-NEXT:    addiu $sp, $sp, -16
@@ -591,10 +591,10 @@ define i8* @_Z3fooi(i32 signext %Letter) {
 ; PIC-MIPS64R6-NEXT:    jr $ra
 ; PIC-MIPS64R6-NEXT:    daddiu $sp, $sp, 16
 entry:
-  %retval = alloca i8*, align 8
+  %retval = alloca ptr, align 8
   %Letter.addr = alloca i32, align 4
-  store i32 %Letter, i32* %Letter.addr, align 4
-  %0 = load i32, i32* %Letter.addr, align 4
+  store i32 %Letter, ptr %Letter.addr, align 4
+  %0 = load i32, ptr %Letter.addr, align 4
   switch i32 %0, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
@@ -606,38 +606,38 @@ entry:
   ]
 
 sw.bb:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str, ptr %retval, align 8
   br label %return
 
 sw.bb1:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.1, ptr %retval, align 8
   br label %return
 
 sw.bb2:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.2, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.2, ptr %retval, align 8
   br label %return
 
 sw.bb3:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.3, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.3, ptr %retval, align 8
   br label %return
 
 sw.bb4:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.4, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.4, ptr %retval, align 8
   br label %return
 
 sw.bb5:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.5, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.5, ptr %retval, align 8
   br label %return
 
 sw.bb6:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.6, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.6, ptr %retval, align 8
   br label %return
 
 sw.epilog:
-  store i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str.7, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.7, ptr %retval, align 8
   br label %return
 
 return:
-  %1 = load i8*, i8** %retval, align 8
-  ret i8* %1
+  %1 = load ptr, ptr %retval, align 8
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll b/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
index fffda991ae4b6..e8771feefad33 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
@@ -129,7 +129,7 @@ entry:
   br i1 %cmp, label %end, label %then
 
 then:
-  store i32 1, i32* @x, align 4
+  store i32 1, ptr @x, align 4
   br label %end
 
 end:

diff  --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll b/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
index 59a2c3eae6599..d544cdf169fe3 100644
--- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
+++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
@@ -12,7 +12,7 @@
 ; RUN:   -verify-machineinstrs | FileCheck -check-prefix=N64 %s
 
 declare void @callee()
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
 
 @val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
 
@@ -93,7 +93,7 @@ define void @caller() {
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    daddiu $sp, $sp, 16
   call void @callee()
-  call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 @val, i8 0, i32 80, i1 false)
   ret  void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/indirectcall.ll b/llvm/test/CodeGen/Mips/indirectcall.ll
index 2791cab997027..f3408756d5c46 100644
--- a/llvm/test/CodeGen/Mips/indirectcall.ll
+++ b/llvm/test/CodeGen/Mips/indirectcall.ll
@@ -1,6 +1,6 @@
 ; RUN: llc  < %s -mtriple=mipsel -relocation-model=static -mips-tail-calls=1 | FileCheck %s 
 
-define void @foo0(void (i32)* nocapture %f1) nounwind {
+define void @foo0(ptr nocapture %f1) nounwind {
 entry:
 ; CHECK: jr $25
   tail call void %f1(i32 13) nounwind

diff  --git a/llvm/test/CodeGen/Mips/init-array.ll b/llvm/test/CodeGen/Mips/init-array.ll
index 1f1b4a050d244..86cac996db89a 100644
--- a/llvm/test/CodeGen/Mips/init-array.ll
+++ b/llvm/test/CodeGen/Mips/init-array.ll
@@ -2,7 +2,7 @@
 
 target triple = "mipsel-unknown-linux"
 
- at llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @test, i8* null }]
+ at llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @test, ptr null }]
 ; CHECK: .section
 ; CHECK: .init_array
 ; CHECK-NOT: .ctors

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll b/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll
index 9f6f1ebb28588..759e84b02007c 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll
@@ -15,9 +15,9 @@ entry:
 ; CHECK-NEXT: #NO_APP
   %a = alloca i32, align 4
   %b = alloca i32, align 4
-  store i32 20, i32* %a, align 4
-  %0 = load i32, i32* %a, align 4
+  store i32 20, ptr %a, align 4
+  %0 = load i32, ptr %a, align 4
   %1 = call i32 asm sideeffect "addi $$9, $1, 8\0A\09ori $0, $$9, 6", "=r,r,~{$1}"(i32 %0)
-  store i32 %1, i32* %b, align 4
+  store i32 %1, ptr %b, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
index 2cd2be128db13..aa75a1d0a9bc4 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-R.ll
@@ -2,11 +2,11 @@
 
 @data = global [8193 x i32] zeroinitializer
 
-define void @R(i32 *%p) nounwind {
+define void @R(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: R:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) @data)
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -16,11 +16,11 @@ entry:
   ret void
 }
 
-define void @R_offset_4(i32 *%p) nounwind {
+define void @R_offset_4(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -30,11 +30,11 @@ entry:
   ret void
 }
 
-define void @R_offset_254(i32 *%p) nounwind {
+define void @R_offset_254(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_254:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 63))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -44,11 +44,11 @@ entry:
   ret void
 }
 
-define void @R_offset_256(i32 *%p) nounwind {
+define void @R_offset_256(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: R_offset_256:
 
-  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+  call void asm sideeffect "lw $$1, $0", "*R,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 64))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: addiu $[[BASEPTR2:[0-9]+]], $[[BASEPTR]], 256

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
index 956f3c5288b94..f3431aa4c8251 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-1.ll
@@ -4,11 +4,11 @@
 
 @data = global [8193 x i32] zeroinitializer
 
-define void @ZC(i32 *%p) nounwind {
+define void @ZC(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) @data)
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -18,11 +18,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_n4(i32 *%p) nounwind {
+define void @ZC_offset_n4(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_n4:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 -1))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 -1))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -32,11 +32,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_4(i32 *%p) nounwind {
+define void @ZC_offset_4(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -46,11 +46,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_252(i32 *%p) nounwind {
+define void @ZC_offset_252(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_252:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 63))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 63))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL: #APP
@@ -60,11 +60,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_256(i32 *%p) nounwind {
+define void @ZC_offset_256(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_256:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 64))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 64))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -81,11 +81,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_2044(i32 *%p) nounwind {
+define void @ZC_offset_2044(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_2044:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 511))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 511))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -102,11 +102,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_2048(i32 *%p) nounwind {
+define void @ZC_offset_2048(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_2048:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 512))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 512))
 
   ; ALL: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -124,11 +124,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_32764(i32 *%p) nounwind {
+define void @ZC_offset_32764(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
 
   ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
 
@@ -146,11 +146,11 @@ entry:
   ret void
 }
 
-define void @ZC_offset_32768(i32 *%p) nounwind {
+define void @ZC_offset_32768(ptr %p) nounwind {
 entry:
   ; ALL-LABEL: ZC_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*^ZC,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
 
   ; ALL-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; ALL-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
index c9c94deec6e4f..b05f586b1351b 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-ZC-2.ll
@@ -12,9 +12,8 @@ define i32 @Atomic() {
 ; CHECK-LABEL: Atomic:
 entry:
   %s = alloca %struct.anon, align 4
-  %0 = bitcast %struct.anon* %s to i8*
-  %count = getelementptr inbounds %struct.anon, %struct.anon* %s, i64 0, i32 1
-  store i32 0, i32* %count, align 4
+  %count = getelementptr inbounds %struct.anon, ptr %s, i64 0, i32 1
+  store i32 0, ptr %count, align 4
 ; R6: addiu $[[R0:[0-9a-z]+]], $sp, {{[0-9]+}}
 
 ; ALL: #APP
@@ -27,8 +26,8 @@ entry:
 
 ; ALL: #NO_APP
 
-  %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* elementtype(i32) %count, i32 10, i32* elementtype(i32) %count)
-  %asmresult1.i = extractvalue { i32, i32 } %1, 1
+  %0 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(ptr elementtype(i32) %count, i32 10, ptr elementtype(i32) %count)
+  %asmresult1.i = extractvalue { i32, i32 } %0, 1
   %cmp = icmp ne i32 %asmresult1.i, 10
   %conv = zext i1 %cmp to i32
   %call2 = call i32 @f(i32 signext %conv)

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
index d48caaabdbc0c..a2a863671d40e 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-1.ll
@@ -2,11 +2,11 @@
 
 @data = global [8193 x i32] zeroinitializer
 
-define void @m(i32 *%p) nounwind {
+define void @m(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: m:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) @data)
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -16,11 +16,11 @@ entry:
   ret void
 }
 
-define void @m_offset_4(i32 *%p) nounwind {
+define void @m_offset_4(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -30,11 +30,11 @@ entry:
   ret void
 }
 
-define void @m_offset_32764(i32 *%p) nounwind {
+define void @m_offset_32764(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -44,11 +44,11 @@ entry:
   ret void
 }
 
-define void @m_offset_32768(i32 *%p) nounwind {
+define void @m_offset_32768(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: m_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*m,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
index 0a6994a715bf8..295b93b6f1822 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-m-2.ll
@@ -19,9 +19,9 @@ entry:
 ; CHECK: sw  $[[T3]], 0($[[T1]])
 
   %l1 = alloca i32, align 4
-  call void asm "sw $1, $0", "=*m,r"(i32* elementtype(i32) %l1, i32 %x) nounwind
-  %0 = call i32 asm "lw $0, $1", "=r,*m"(i32* elementtype(i32) %l1) nounwind
-  store i32 %0, i32* @g1, align 4
+  call void asm "sw $1, $0", "=*m,r"(ptr elementtype(i32) %l1, i32 %x) nounwind
+  %0 = call i32 asm "lw $0, $1", "=r,*m"(ptr elementtype(i32) %l1) nounwind
+  store i32 %0, ptr @g1, align 4
   ret i32 %0
 }
 
@@ -55,13 +55,13 @@ entry:
 define void @main() {
 entry:
 ; Second word:
-  tail call void asm sideeffect "    lw    $0, ${1:D}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:D}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
 ; First word. Notice, no 'D':
-  tail call void asm sideeffect "    lw    $0, ${1}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
 
 ; High-order part.
-  tail call void asm sideeffect "    lw    $0, ${1:M}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:M}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
 ; Low-order part.
-  tail call void asm sideeffect "    lw    $0, ${1:L}", "r,*m,~{$11}"(i32 undef, i32* elementtype(i32) getelementptr inbounds ([20 x i32], [20 x i32]* @b, i32 0, i32 3))
+  tail call void asm sideeffect "    lw    $0, ${1:L}", "r,*m,~{$11}"(i32 undef, ptr elementtype(i32) getelementptr inbounds ([20 x i32], ptr @b, i32 0, i32 3))
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
index 157bf6875a73a..550446deb5816 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-o.ll
@@ -2,11 +2,11 @@
 
 @data = global [8193 x i32] zeroinitializer
 
-define void @o(i32 *%p) nounwind {
+define void @o(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: o:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 0))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) @data)
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -16,11 +16,11 @@ entry:
   ret void
 }
 
-define void @o_offset_4(i32 *%p) nounwind {
+define void @o_offset_4(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_4:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 1))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 1))
 
   ; CHECK: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -30,11 +30,11 @@ entry:
   ret void
 }
 
-define void @o_offset_32764(i32 *%p) nounwind {
+define void @o_offset_32764(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_32764:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8191))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8191))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK: #APP
@@ -44,11 +44,11 @@ entry:
   ret void
 }
 
-define void @o_offset_32768(i32 *%p) nounwind {
+define void @o_offset_32768(ptr %p) nounwind {
 entry:
   ; CHECK-LABEL: o_offset_32768:
 
-  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(i32* elementtype(i32) getelementptr inbounds ([8193 x i32], [8193 x i32]* @data, i32 0, i32 8192))
+  call void asm sideeffect "lw $$1, $0", "*o,~{$1}"(ptr elementtype(i32) getelementptr inbounds ([8193 x i32], ptr @data, i32 0, i32 8192))
 
   ; CHECK-DAG: lw $[[BASEPTR:[0-9]+]], %got(data)(
   ; CHECK-DAG: ori $[[T0:[0-9]+]], $zero, 32768

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-r-i1.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-r-i1.ll
index 134d565125596..218991c6b0e9c 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-r-i1.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-r-i1.ll
@@ -4,7 +4,7 @@
 
 define void @b() {
 entry:
-  %0 = load i8, i8* @a, align 1
+  %0 = load i8, ptr @a, align 1
   %tobool = trunc i8 %0 to i1
   call void asm sideeffect "", "Jr,~{$1}"(i1 %tobool)
   ret void

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-constraint-reg.ll b/llvm/test/CodeGen/Mips/inlineasm-constraint-reg.ll
index b4c1587a8fbff..4d3a288875fa5 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-constraint-reg.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-constraint-reg.ll
@@ -39,7 +39,7 @@ entry:
 ; CHECK-NEXT:  mflo ${{[0-9]+}}
   %bosco = alloca i32, align 4
   call i32 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
-  store volatile i32 %4, i32* %bosco, align 4
+  store volatile i32 %4, ptr %bosco, align 4
  
 ; Check the 'l' constraint for 16-bit type.
 ; CHECK:       #APP
@@ -49,7 +49,7 @@ entry:
 ; CHECK-NEXT:  mflo ${{[0-9]+}}
   %bosco16 = alloca i16, align 4
   call i16 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
-  store volatile i16 %5, i16* %bosco16, align 4
+  store volatile i16 %5, ptr %bosco16, align 4
 
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
index 0b8e831150e64..88e56a89cd7bb 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll
@@ -168,7 +168,7 @@ entry:
 ; LE32:          or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
 ; BE32:          or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
 ; ALL:           #NO_APP
-  %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+  %bosco = load i64, ptr @uval, align 8
   %trunc1 = trunc i64 %bosco to i32
   tail call i32 asm sideeffect "or $0, ${1:D}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
   ret i32 0
@@ -186,7 +186,7 @@ entry:
 ; LE32:          or ${{[0-9]+}}, $[[FIRST]], ${{[0-9]+}}
 ; BE32:          or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
 ; ALL:           #NO_APP
-  %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+  %bosco = load i64, ptr @uval, align 8
   %trunc1 = trunc i64 %bosco to i32
   tail call i32 asm sideeffect "or $0, ${1:L}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
   ret i32 0
@@ -204,7 +204,7 @@ entry:
 ; LE32:          or ${{[0-9]+}}, $[[SECOND]], ${{[0-9]+}}
 ; BE32:          or ${{[0-9]+}}, $[[FIRST]], ${{[0-9]+}}
 ; ALL:           #NO_APP
-  %bosco = load i64, i64* getelementptr inbounds (%union.u_tag, %union.u_tag* @uval, i32 0, i32 0), align 8
+  %bosco = load i64, ptr @uval, align 8
   %trunc1 = trunc i64 %bosco to i32
   tail call i32 asm sideeffect "or $0, ${1:M}, $2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind
   ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/inlineasm-output-template.ll b/llvm/test/CodeGen/Mips/inlineasm-output-template.ll
index e992ddf31dd45..391a4d6c434ee 100644
--- a/llvm/test/CodeGen/Mips/inlineasm-output-template.ll
+++ b/llvm/test/CodeGen/Mips/inlineasm-output-template.ll
@@ -13,7 +13,7 @@ define dso_local i32 @test_inlineasm_c_output_template0() {
 ; CHECK: #TEST baz
 @baz = internal global i32 0, align 4
 define dso_local i32 @test_inlineasm_c_output_template1() {
-  tail call void asm sideeffect "#TEST ${0:c}", "i"(i32* nonnull @baz)
+  tail call void asm sideeffect "#TEST ${0:c}", "i"(ptr nonnull @baz)
   ret i32 42
 }
 

diff  --git a/llvm/test/CodeGen/Mips/insn-zero-size-bb.ll b/llvm/test/CodeGen/Mips/insn-zero-size-bb.ll
index d2124c407a0d8..d5d3ac3e1951a 100644
--- a/llvm/test/CodeGen/Mips/insn-zero-size-bb.ll
+++ b/llvm/test/CodeGen/Mips/insn-zero-size-bb.ll
@@ -8,7 +8,7 @@
 declare i32 @foo(...)
 declare void @bar()
 
-define void @main() personality i8* bitcast (i32 (...)* @foo to i8*) {
+define void @main() personality ptr @foo {
 entry:
   invoke void @bar() #0
           to label %unreachable unwind label %return
@@ -19,8 +19,8 @@ unreachable:
   unreachable
 
 return:
-  %0 = landingpad { i8*, i32 }
-          catch i8* null
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll
index d226b48cb20f3..6444d1fe85563 100644
--- a/llvm/test/CodeGen/Mips/int-to-float-conversion.ll
+++ b/llvm/test/CodeGen/Mips/int-to-float-conversion.ll
@@ -3,7 +3,7 @@
 ; RUN: llc -march=mips64el -mcpu=mips64 < %s | FileCheck %s -check-prefix=64
 
 @i1 = global [3 x i32] [i32 1, i32 2, i32 3], align 4
- at i3 = common global i32* null, align 4
+ at i3 = common global ptr null, align 4
 
 ; 32-LABEL: test_float_int_:
 ; 32: mtc1 ${{[0-9]+}}, $f[[R0:[0-9]+]]

diff  --git a/llvm/test/CodeGen/Mips/internalfunc.ll b/llvm/test/CodeGen/Mips/internalfunc.ll
index b6b1c96c5f3be..7db46f54da537 100644
--- a/llvm/test/CodeGen/Mips/internalfunc.ll
+++ b/llvm/test/CodeGen/Mips/internalfunc.ll
@@ -1,10 +1,10 @@
 ; RUN: llc < %s -march=mipsel -relocation-model=pic | FileCheck %s
 
- at caller.sf1 = internal unnamed_addr global void (...)* null, align 4
- at gf1 = external global void (...)*
+ at caller.sf1 = internal unnamed_addr global ptr null, align 4
+ at gf1 = external global ptr
 @.str = private unnamed_addr constant [3 x i8] c"f2\00"
 
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, ptr nocapture %argv) nounwind {
 entry:
 ; CHECK: lw $[[R0:[0-9]+]], %got(f2)
 ; CHECK: addiu $25, $[[R0]], %lo(f2)
@@ -20,7 +20,7 @@ entry:
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
-  %tmp1 = load void (...)*, void (...)** @caller.sf1, align 4
+  %tmp1 = load ptr, ptr @caller.sf1, align 4
   tail call void (...) %tmp1() nounwind
   br label %if.end
 
@@ -30,23 +30,23 @@ if.end:                                           ; preds = %entry, %if.then
 ; CHECK: lw  $[[R3:[0-9]+]], %got(caller.sf1)
 ; CHECK: sw  ${{[0-9]+}}, %lo(caller.sf1)($[[R3]])
   %tobool3 = icmp ne i32 %a0, 0
-  %tmp4 = load void (...)*, void (...)** @gf1, align 4
-  %cond = select i1 %tobool3, void (...)* %tmp4, void (...)* bitcast (void ()* @sf2 to void (...)*)
-  store void (...)* %cond, void (...)** @caller.sf1, align 4
+  %tmp4 = load ptr, ptr @gf1, align 4
+  %cond = select i1 %tobool3, ptr %tmp4, ptr @sf2
+  store ptr %cond, ptr @caller.sf1, align 4
   ret void
 }
 
 define internal void @sf2() nounwind {
 entry:
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str) nounwind
   ret void
 }
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
 
 define internal fastcc void @f2() nounwind noinline {
 entry:
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i32 0, i32 0)) nounwind
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str) nounwind
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/interrupt-attr.ll b/llvm/test/CodeGen/Mips/interrupt-attr.ll
index 80bd148bc86b6..46eb7f54d17ee 100644
--- a/llvm/test/CodeGen/Mips/interrupt-attr.ll
+++ b/llvm/test/CodeGen/Mips/interrupt-attr.ll
@@ -34,7 +34,7 @@ define void @isr_sw0() #0 {
 ; CHECK: sw      $26, [[R3:[0-9]+]]($sp)
 ; CHECK: mfhi    $26
 ; CHECK: sw      $26, [[R4:[0-9]+]]($sp)
-  call void bitcast (void (...)* @write to void ()*)()
+  call void @write()
 ; CHECK: lw      $26, [[R4:[0-9]+]]($sp)
 ; CHECK: mthi    $26
 ; CHECK: lw      $26, [[R3:[0-9]+]]($sp)

diff  --git a/llvm/test/CodeGen/Mips/jtstat.ll b/llvm/test/CodeGen/Mips/jtstat.ll
index 122001d314a72..21d7aba6aaa61 100644
--- a/llvm/test/CodeGen/Mips/jtstat.ll
+++ b/llvm/test/CodeGen/Mips/jtstat.ll
@@ -7,8 +7,8 @@
 define void @test(i32 %i) nounwind {
 entry:
   %i.addr = alloca i32, align 4
-  store i32 %i, i32* %i.addr, align 4
-  %0 = load i32, i32* %i.addr, align 4
+  store i32 %i, ptr %i.addr, align 4
+  %0 = load i32, ptr %i.addr, align 4
   switch i32 %0, label %sw.epilog [
     i32 115, label %sw.bb
     i32 105, label %sw.bb1
@@ -21,35 +21,35 @@ entry:
   ]
 
 sw.bb:                                            ; preds = %entry
-  store i8 115, i8* @c, align 1
+  store i8 115, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb1:                                           ; preds = %entry
-  store i8 105, i8* @c, align 1
+  store i8 105, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb2:                                           ; preds = %entry
-  store i8 100, i8* @c, align 1
+  store i8 100, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb3:                                           ; preds = %entry
-  store i8 108, i8* @c, align 1
+  store i8 108, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb4:                                           ; preds = %entry
-  store i8 99, i8* @c, align 1
+  store i8 99, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb5:                                           ; preds = %entry
-  store i8 68, i8* @c, align 1
+  store i8 68, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb6:                                           ; preds = %entry
-  store i8 81, i8* @c, align 1
+  store i8 81, ptr @c, align 1
   br label %sw.epilog
 
 sw.bb7:                                           ; preds = %entry
-  store i8 76, i8* @c, align 1
+  store i8 76, ptr @c, align 1
   br label %sw.epilog
 
 sw.epilog:                                        ; preds = %entry, %sw.bb7, %sw.bb6, %sw.bb5, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb

diff  --git a/llvm/test/CodeGen/Mips/jumptable_labels.ll b/llvm/test/CodeGen/Mips/jumptable_labels.ll
index 8c7edc10689fb..8ae22be9dd23a 100644
--- a/llvm/test/CodeGen/Mips/jumptable_labels.ll
+++ b/llvm/test/CodeGen/Mips/jumptable_labels.ll
@@ -21,12 +21,12 @@
 @.str.6 = private unnamed_addr constant [2 x i8] c"G\00", align 1
 @.str.7 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
 
-define i8* @_Z3fooi(i32 signext %Letter) {
+define ptr @_Z3fooi(i32 signext %Letter) {
 entry:
-  %retval = alloca i8*, align 8
+  %retval = alloca ptr, align 8
   %Letter.addr = alloca i32, align 4
-  store i32 %Letter, i32* %Letter.addr, align 4
-  %0 = load i32, i32* %Letter.addr, align 4
+  store i32 %Letter, ptr %Letter.addr, align 4
+  %0 = load i32, ptr %Letter.addr, align 4
   switch i32 %0, label %sw.epilog [
     i32 0, label %sw.bb
     i32 1, label %sw.bb1
@@ -38,38 +38,38 @@ entry:
   ]
 
 sw.bb:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str, ptr %retval, align 8
   br label %return
 
 sw.bb1:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.1, ptr %retval, align 8
   br label %return
 
 sw.bb2:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.2, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.2, ptr %retval, align 8
   br label %return
 
 sw.bb3:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.3, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.3, ptr %retval, align 8
   br label %return
 
 sw.bb4:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.4, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.4, ptr %retval, align 8
   br label %return
 
 sw.bb5:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.5, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.5, ptr %retval, align 8
   br label %return
 
 sw.bb6:
-  store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.6, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.6, ptr %retval, align 8
   br label %return
 
 sw.epilog:
-  store i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str.7, i32 0, i32 0), i8** %retval, align 8
+  store ptr @.str.7, ptr %retval, align 8
   br label %return
 
 return:
-  %1 = load i8*, i8** %retval, align 8
-  ret i8* %1
+  %1 = load ptr, ptr %retval, align 8
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/Mips/l3mc.ll b/llvm/test/CodeGen/Mips/l3mc.ll
index ed829cb39c7b0..b452082566285 100644
--- a/llvm/test/CodeGen/Mips/l3mc.ll
+++ b/llvm/test/CodeGen/Mips/l3mc.ll
@@ -42,60 +42,60 @@
 ; Function Attrs: nounwind
 define void @_Z3foov() #0 {
 entry:
-  %0 = load double, double* @d1, align 8
+  %0 = load double, ptr @d1, align 8
   %conv = fptosi double %0 to i64
-  store i64 %conv, i64* @ll1, align 8
-  %1 = load double, double* @d2, align 8
+  store i64 %conv, ptr @ll1, align 8
+  %1 = load double, ptr @d2, align 8
   %conv1 = fptoui double %1 to i64
-  store i64 %conv1, i64* @ull1, align 8
-  %2 = load float, float* @f1, align 4
+  store i64 %conv1, ptr @ull1, align 8
+  %2 = load float, ptr @f1, align 4
   %conv2 = fptosi float %2 to i64
-  store i64 %conv2, i64* @ll2, align 8
-  %3 = load float, float* @f2, align 4
+  store i64 %conv2, ptr @ll2, align 8
+  %3 = load float, ptr @f2, align 4
   %conv3 = fptoui float %3 to i64
-  store i64 %conv3, i64* @ull2, align 8
-  %4 = load double, double* @d3, align 8
+  store i64 %conv3, ptr @ull2, align 8
+  %4 = load double, ptr @d3, align 8
   %conv4 = fptosi double %4 to i32
-  store i32 %conv4, i32* @l1, align 4
-  %5 = load double, double* @d4, align 8
+  store i32 %conv4, ptr @l1, align 4
+  %5 = load double, ptr @d4, align 8
   %conv5 = fptoui double %5 to i32
-  store i32 %conv5, i32* @ul1, align 4
-  %6 = load float, float* @f3, align 4
+  store i32 %conv5, ptr @ul1, align 4
+  %6 = load float, ptr @f3, align 4
   %conv6 = fptosi float %6 to i32
-  store i32 %conv6, i32* @l2, align 4
-  %7 = load float, float* @f4, align 4
+  store i32 %conv6, ptr @l2, align 4
+  %7 = load float, ptr @f4, align 4
   %conv7 = fptoui float %7 to i32
-  store i32 %conv7, i32* @ul2, align 4
+  store i32 %conv7, ptr @ul2, align 4
   ret void
 }
 
 ; Function Attrs: nounwind
 define void @_Z3goov() #0 {
 entry:
-  %0 = load i64, i64* @ll1, align 8
+  %0 = load i64, ptr @ll1, align 8
   %conv = sitofp i64 %0 to double
-  store double %conv, double* @d1, align 8
-  %1 = load i64, i64* @ull1, align 8
+  store double %conv, ptr @d1, align 8
+  %1 = load i64, ptr @ull1, align 8
   %conv1 = uitofp i64 %1 to double
-  store double %conv1, double* @d2, align 8
-  %2 = load i64, i64* @ll2, align 8
+  store double %conv1, ptr @d2, align 8
+  %2 = load i64, ptr @ll2, align 8
   %conv2 = sitofp i64 %2 to float
-  store float %conv2, float* @f1, align 4
-  %3 = load i64, i64* @ull2, align 8
+  store float %conv2, ptr @f1, align 4
+  %3 = load i64, ptr @ull2, align 8
   %conv3 = uitofp i64 %3 to float
-  store float %conv3, float* @f2, align 4
-  %4 = load i32, i32* @l1, align 4
+  store float %conv3, ptr @f2, align 4
+  %4 = load i32, ptr @l1, align 4
   %conv4 = sitofp i32 %4 to double
-  store double %conv4, double* @d3, align 8
-  %5 = load i32, i32* @ul1, align 4
+  store double %conv4, ptr @d3, align 8
+  %5 = load i32, ptr @ul1, align 4
   %conv5 = uitofp i32 %5 to double
-  store double %conv5, double* @d4, align 8
-  %6 = load i32, i32* @l2, align 4
+  store double %conv5, ptr @d4, align 8
+  %6 = load i32, ptr @l2, align 4
   %conv6 = sitofp i32 %6 to float
-  store float %conv6, float* @f3, align 4
-  %7 = load i32, i32* @ul2, align 4
+  store float %conv6, ptr @f3, align 4
+  %7 = load i32, ptr @ul2, align 4
   %conv7 = uitofp i32 %7 to float
-  store float %conv7, float* @f4, align 4
+  store float %conv7, ptr @f4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/largeimm1.ll b/llvm/test/CodeGen/Mips/largeimm1.ll
index b4d15f9e1e8cb..adef511a4aadd 100644
--- a/llvm/test/CodeGen/Mips/largeimm1.ll
+++ b/llvm/test/CodeGen/Mips/largeimm1.ll
@@ -3,8 +3,8 @@
 define void @f() nounwind {
 entry:
   %a1 = alloca [1073741824 x i8], align 1
-  %arrayidx = getelementptr inbounds [1073741824 x i8], [1073741824 x i8]* %a1, i32 0, i32 1048676
-  call void @f2(i8* %arrayidx) nounwind
+  %arrayidx = getelementptr inbounds [1073741824 x i8], ptr %a1, i32 0, i32 1048676
+  call void @f2(ptr %arrayidx) nounwind
   ret void
 ; CHECK-LABEL: f:
 
@@ -16,4 +16,4 @@ entry:
 ; CHECK: addu   ${{[0-9]+}}, $sp, $[[R2]]
 }
 
-declare void @f2(i8*)
+declare void @f2(ptr)

diff  --git a/llvm/test/CodeGen/Mips/largeimmprinting.ll b/llvm/test/CodeGen/Mips/largeimmprinting.ll
index 1d5b9c47b7df2..144e2bc2511cb 100644
--- a/llvm/test/CodeGen/Mips/largeimmprinting.ll
+++ b/llvm/test/CodeGen/Mips/largeimmprinting.ll
@@ -25,12 +25,11 @@ entry:
 ; 64:  sd      $ra, 24($[[R1]])
 
   %agg.tmp = alloca %struct.S1, align 1
-  %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp, i8* align 1 getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i1 false)
-  call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind
+  call void @llvm.memcpy.p0.p0.i32(ptr align 1 %agg.tmp, ptr align 1 @s1, i32 65536, i1 false)
+  call void @f2(ptr byval(%struct.S1) %agg.tmp) nounwind
   ret void
 }
 
-declare void @f2(%struct.S1* byval(%struct.S1))
+declare void @f2(ptr byval(%struct.S1))
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind

diff  --git a/llvm/test/CodeGen/Mips/lb1.ll b/llvm/test/CodeGen/Mips/lb1.ll
index 1e908b81a8780..caff4c7fa33a4 100644
--- a/llvm/test/CodeGen/Mips/lb1.ll
+++ b/llvm/test/CodeGen/Mips/lb1.ll
@@ -6,13 +6,13 @@
 define i32 @main() nounwind {
 entry:
   %i = alloca i32, align 4
-  %0 = load i8, i8* @c, align 1
+  %0 = load i8, ptr @c, align 1
 ; 16:	lb	${{[0-9]+}}, 0(${{[0-9]+}})
   %conv = sext i8 %0 to i32
-  store i32 %conv, i32* %i, align 4
-  %1 = load i32, i32* %i, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+  store i32 %conv, ptr %i, align 4
+  %1 = load i32, ptr %i, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/lbu1.ll b/llvm/test/CodeGen/Mips/lbu1.ll
index 32515411b7d47..13fe20af1aef9 100644
--- a/llvm/test/CodeGen/Mips/lbu1.ll
+++ b/llvm/test/CodeGen/Mips/lbu1.ll
@@ -6,14 +6,14 @@
 define i32 @main() nounwind {
 entry:
   %i = alloca i32, align 4
-  %0 = load i8, i8* @c, align 1
+  %0 = load i8, ptr @c, align 1
   %conv = zext i8 %0 to i32
 ; 16:	lbu	${{[0-9]+}}, 0(${{[0-9]+}})
-  store i32 %conv, i32* %i, align 4
-  %1 = load i8, i8* @c, align 1
+  store i32 %conv, ptr %i, align 4
+  %1 = load i8, ptr @c, align 1
   %conv1 = zext i8 %1 to i32
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %conv1)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %conv1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/lcb2.ll b/llvm/test/CodeGen/Mips/lcb2.ll
index 4987c606e3300..7f9e71d5efecf 100644
--- a/llvm/test/CodeGen/Mips/lcb2.ll
+++ b/llvm/test/CodeGen/Mips/lcb2.ll
@@ -9,13 +9,13 @@
 ; Function Attrs: nounwind optsize
 define i32 @bnez() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry
   tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !5
-  store i32 0, i32* @i, align 4, !tbaa !1
+  store i32 0, ptr @i, align 4, !tbaa !1
   br label %if.end
 
 if.end:                                           ; preds = %if.then, %entry
@@ -31,17 +31,17 @@ if.end:                                           ; preds = %if.then, %entry
 ; Function Attrs: nounwind optsize
 define i32 @beqz() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 10, i32* @j, align 4, !tbaa !1
+  store i32 10, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !6
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 55, i32* @j, align 4, !tbaa !1
+  store i32 55, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !7
   br label %if.end
 
@@ -60,19 +60,19 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define void @bteqz() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @k, align 4, !tbaa !1
+  store i32 1, ptr @k, align 4, !tbaa !1
   tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !8
   br label %if.end
 
 if.else:                                          ; preds = %entry
   tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
-  store i32 2, i32* @k, align 4, !tbaa !1
+  store i32 2, ptr @k, align 4, !tbaa !1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -90,15 +90,15 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define void @btz() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp1 = icmp sgt i32 %0, %1
   br i1 %cmp1, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry, %if.then
   tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !10
-  %2 = load i32, i32* @i, align 4, !tbaa !1
-  %3 = load i32, i32* @j, align 4, !tbaa !1
+  %2 = load i32, ptr @i, align 4, !tbaa !1
+  %3 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp sgt i32 %2, %3
   br i1 %cmp, label %if.then, label %if.end
 

diff  --git a/llvm/test/CodeGen/Mips/lcb3c.ll b/llvm/test/CodeGen/Mips/lcb3c.ll
index a41c73819fe7d..386059f144ddf 100644
--- a/llvm/test/CodeGen/Mips/lcb3c.ll
+++ b/llvm/test/CodeGen/Mips/lcb3c.ll
@@ -7,17 +7,17 @@
 ; Function Attrs: nounwind
 define i32 @s() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 0, i32* @i, align 4
+  store i32 0, ptr @i, align 4
   call void asm sideeffect ".space 1000", ""() #1, !srcloc !1
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 1, i32* @i, align 4
+  store i32 1, ptr @i, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -30,17 +30,17 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind
 define i32 @b() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 0, i32* @i, align 4
+  store i32 0, ptr @i, align 4
   call void asm sideeffect ".space 1000000", ""() #1, !srcloc !2
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 1, i32* @i, align 4
+  store i32 1, ptr @i, align 4
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then

diff  --git a/llvm/test/CodeGen/Mips/lcb4a.ll b/llvm/test/CodeGen/Mips/lcb4a.ll
index c4fcbc24022d5..87089a7795582 100644
--- a/llvm/test/CodeGen/Mips/lcb4a.ll
+++ b/llvm/test/CodeGen/Mips/lcb4a.ll
@@ -7,7 +7,7 @@
 ; Function Attrs: nounwind optsize
 define i32 @foo() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
@@ -21,7 +21,7 @@ if.else:                                          ; preds = %entry
 
 if.end:                                           ; preds = %if.else, %if.then
   %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
-  store i32 %storemerge, i32* @i, align 4, !tbaa !1
+  store i32 %storemerge, ptr @i, align 4, !tbaa !1
   ret i32 0
 }
 
@@ -32,7 +32,7 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define i32 @goo() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
@@ -46,7 +46,7 @@ if.else:                                          ; preds = %entry
 
 if.end:                                           ; preds = %if.else, %if.then
   %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
-  store i32 %storemerge, i32* @i, align 4, !tbaa !1
+  store i32 %storemerge, ptr @i, align 4, !tbaa !1
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/lcb5.ll b/llvm/test/CodeGen/Mips/lcb5.ll
index 96e924a44f725..9c12978a88440 100644
--- a/llvm/test/CodeGen/Mips/lcb5.ll
+++ b/llvm/test/CodeGen/Mips/lcb5.ll
@@ -7,7 +7,7 @@
 ; Function Attrs: nounwind optsize
 define i32 @x0() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
@@ -21,7 +21,7 @@ if.else:                                          ; preds = %entry
 
 if.end:                                           ; preds = %if.else, %if.then
   %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
-  store i32 %storemerge, i32* @i, align 4, !tbaa !1
+  store i32 %storemerge, ptr @i, align 4, !tbaa !1
   ret i32 0
 }
 
@@ -33,7 +33,7 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define i32 @x1() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
@@ -47,7 +47,7 @@ if.else:                                          ; preds = %entry
 
 if.end:                                           ; preds = %if.else, %if.then
   %storemerge = phi i32 [ 1, %if.else ], [ 0, %if.then ]
-  store i32 %storemerge, i32* @i, align 4, !tbaa !1
+  store i32 %storemerge, ptr @i, align 4, !tbaa !1
   ret i32 0
 }
 
@@ -61,17 +61,17 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define i32 @y0() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 10, i32* @j, align 4, !tbaa !1
+  store i32 10, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 1000", ""() #1, !srcloc !9
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 55, i32* @j, align 4, !tbaa !1
+  store i32 55, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 1004", ""() #1, !srcloc !10
   br label %if.end
 
@@ -86,17 +86,17 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define i32 @y1() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 10, i32* @j, align 4, !tbaa !1
+  store i32 10, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 1000000", ""() #1, !srcloc !11
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  store i32 55, i32* @j, align 4, !tbaa !1
+  store i32 55, ptr @j, align 4, !tbaa !1
   tail call void asm sideeffect ".space 1000004", ""() #1, !srcloc !12
   br label %if.end
 
@@ -114,19 +114,19 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define void @z0() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @k, align 4, !tbaa !1
+  store i32 1, ptr @k, align 4, !tbaa !1
   tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !13
   br label %if.end
 
 if.else:                                          ; preds = %entry
   tail call void asm sideeffect ".space 10004", ""() #1, !srcloc !14
-  store i32 2, i32* @k, align 4, !tbaa !1
+  store i32 2, ptr @k, align 4, !tbaa !1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -140,19 +140,19 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define void @z1() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  store i32 1, i32* @k, align 4, !tbaa !1
+  store i32 1, ptr @k, align 4, !tbaa !1
   tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !15
   br label %if.end
 
 if.else:                                          ; preds = %entry
   tail call void asm sideeffect ".space 10000004", ""() #1, !srcloc !16
-  store i32 2, i32* @k, align 4, !tbaa !1
+  store i32 2, ptr @k, align 4, !tbaa !1
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then
@@ -169,15 +169,15 @@ if.end:                                           ; preds = %if.else, %if.then
 ; Function Attrs: nounwind optsize
 define void @z3() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp1 = icmp sgt i32 %0, %1
   br i1 %cmp1, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry, %if.then
   tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !17
-  %2 = load i32, i32* @i, align 4, !tbaa !1
-  %3 = load i32, i32* @j, align 4, !tbaa !1
+  %2 = load i32, ptr @i, align 4, !tbaa !1
+  %3 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp sgt i32 %2, %3
   br i1 %cmp, label %if.then, label %if.end
 
@@ -192,15 +192,15 @@ if.end:                                           ; preds = %if.then, %entry
 ; Function Attrs: nounwind optsize
 define void @z4() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4, !tbaa !1
-  %1 = load i32, i32* @j, align 4, !tbaa !1
+  %0 = load i32, ptr @i, align 4, !tbaa !1
+  %1 = load i32, ptr @j, align 4, !tbaa !1
   %cmp1 = icmp sgt i32 %0, %1
   br i1 %cmp1, label %if.then, label %if.end
 
 if.then:                                          ; preds = %entry, %if.then
   tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !18
-  %2 = load i32, i32* @i, align 4, !tbaa !1
-  %3 = load i32, i32* @j, align 4, !tbaa !1
+  %2 = load i32, ptr @i, align 4, !tbaa !1
+  %3 = load i32, ptr @j, align 4, !tbaa !1
   %cmp = icmp sgt i32 %2, %3
   br i1 %cmp, label %if.then, label %if.end
 

diff  --git a/llvm/test/CodeGen/Mips/lh1.ll b/llvm/test/CodeGen/Mips/lh1.ll
index dcab12a38e174..4ec8d928cd063 100644
--- a/llvm/test/CodeGen/Mips/lh1.ll
+++ b/llvm/test/CodeGen/Mips/lh1.ll
@@ -6,13 +6,13 @@
 define i32 @main() nounwind {
 entry:
   %i = alloca i32, align 4
-  %0 = load i16, i16* @s, align 2
+  %0 = load i16, ptr @s, align 2
   %conv = sext i16 %0 to i32
 ; 16:	lh	${{[0-9]+}}, 0(${{[0-9]+}})
-  store i32 %conv, i32* %i, align 4
-  %1 = load i32, i32* %i, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+  store i32 %conv, ptr %i, align 4
+  %1 = load i32, ptr %i, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/lhu1.ll b/llvm/test/CodeGen/Mips/lhu1.ll
index 9a52d6fb269fb..1d438b6093de7 100644
--- a/llvm/test/CodeGen/Mips/lhu1.ll
+++ b/llvm/test/CodeGen/Mips/lhu1.ll
@@ -7,13 +7,13 @@
 define i32 @main() nounwind {
 entry:
   %i = alloca i32, align 4
-  %0 = load i16, i16* @s, align 2
+  %0 = load i16, ptr @s, align 2
   %conv = zext i16 %0 to i32
 ; 16:	lhu	${{[0-9]+}}, 0(${{[0-9]+}})
-  store i32 %conv, i32* %i, align 4
-  %1 = load i32, i32* %i, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+  store i32 %conv, ptr %i, align 4
+  %1 = load i32, ptr %i, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/llcarry.ll b/llvm/test/CodeGen/Mips/llcarry.ll
index b7cc6fc8ea757..4bba047ba1f0b 100644
--- a/llvm/test/CodeGen/Mips/llcarry.ll
+++ b/llvm/test/CodeGen/Mips/llcarry.ll
@@ -9,10 +9,10 @@
 
 define void @test1() nounwind {
 entry:
-  %0 = load i64, i64* @i, align 8
-  %1 = load i64, i64* @j, align 8
+  %0 = load i64, ptr @i, align 8
+  %1 = load i64, ptr @j, align 8
   %add = add nsw i64 %1, %0
-  store i64 %add, i64* @k, align 8
+  store i64 %add, ptr @k, align 8
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
@@ -23,27 +23,27 @@ entry:
 
 define void @test2() nounwind {
 entry:
-  %0 = load i64, i64* @i, align 8
-  %1 = load i64, i64* @j, align 8
+  %0 = load i64, ptr @i, align 8
+  %1 = load i64, ptr @j, align 8
   %sub = sub nsw i64 %0, %1
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	move	${{[0-9]+}}, $24
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-  store i64 %sub, i64* @l, align 8
+  store i64 %sub, ptr @l, align 8
   ret void
 }
 
 define void @test3() nounwind {
 entry:
-  %0 = load i64, i64* @ii, align 8
+  %0 = load i64, ptr @ii, align 8
   %add = add nsw i64 %0, 15
 ; 16:	addiu	${{[0-9]+}}, 15
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	move	${{[0-9]+}}, $24
 ; 16:	addu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-  store i64 %add, i64* @m, align 8
+  store i64 %add, ptr @m, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/addrspacecast.ll b/llvm/test/CodeGen/Mips/llvm-ir/addrspacecast.ll
index 060fa4ce7bb14..bddbdc667bd6e 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/addrspacecast.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/addrspacecast.ll
@@ -1,11 +1,11 @@
 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL
 
 ; Address spaces 1-255 are software defined.
-define i32* @cast(i32 *%arg) {
-  %1 = addrspacecast i32* %arg to i32 addrspace(1)*
-  %2 = addrspacecast i32 addrspace(1)* %1 to i32 addrspace(2)*
-  %3 = addrspacecast i32 addrspace(2)* %2 to i32 addrspace(0)*
-  ret i32* %3
+define ptr @cast(ptr %arg) {
+  %1 = addrspacecast ptr %arg to ptr addrspace(1)
+  %2 = addrspacecast ptr addrspace(1) %1 to ptr addrspace(2)
+  %3 = addrspacecast ptr addrspace(2) %2 to ptr addrspace(0)
+  ret ptr %3
 }
 
 ; ALL-LABEL: cast:

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/atomicrmx.ll b/llvm/test/CodeGen/Mips/llvm-ir/atomicrmx.ll
index 9069a6f2d13f3..2e03ee9868a97 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/atomicrmx.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/atomicrmx.ll
@@ -12,7 +12,7 @@ define i32 @ll_sc(i32 signext %x) {
 
 ;CHK32:  LL_R6
 ;CHK32:  SC_R6
-  %1 = atomicrmw add i32* @a, i32 %x monotonic
+  %1 = atomicrmw add ptr @a, i32 %x monotonic
   ret i32 %1
 }
 
@@ -21,6 +21,6 @@ define i64 @lld_scd(i64 signext %x) {
 
 ;CHK64:  LLD_R6
 ;CHK64:  SCD_R6
-  %1 = atomicrmw add i64* @b, i64 %x monotonic
+  %1 = atomicrmw add ptr @b, i64 %x monotonic
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/call.ll b/llvm/test/CodeGen/Mips/llvm-ir/call.ll
index d78f1b5521ddb..a259bdd04c2e4 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/call.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/call.ll
@@ -80,7 +80,7 @@ define float @call_float_void() {
   ret float %2
 }
 
-define i32 @indirect_call_void_void(void ()* %addr) {
+define i32 @indirect_call_void_void(ptr %addr) {
 ; ALL-LABEL: indirect_call_void_void:
 
 ; ALL:           move $25, $4
@@ -92,7 +92,7 @@ define i32 @indirect_call_void_void(void ()* %addr) {
   ret i32 0
 }
 
-define i32 @indirect_call_i32_void(i32 ()* %addr) {
+define i32 @indirect_call_i32_void(ptr %addr) {
 ; ALL-LABEL: indirect_call_i32_void:
 
 ; ALL:           move $25, $4
@@ -106,7 +106,7 @@ define i32 @indirect_call_i32_void(i32 ()* %addr) {
   ret i32 %2
 }
 
-define float @indirect_call_float_void(float ()* %addr) {
+define float @indirect_call_float_void(ptr %addr) {
 ; ALL-LABEL: indirect_call_float_void:
 
 ; ALL:           move $25, $4
@@ -122,7 +122,7 @@ define float @indirect_call_float_void(float ()* %addr) {
 
 ; We can't use 'musttail' here because the verifier is too conservative and
 ; prohibits any prototype 
diff erence.
-define void @tail_indirect_call_void_void(void ()* %addr) {
+define void @tail_indirect_call_void_void(ptr %addr) {
 ; ALL-LABEL: tail_indirect_call_void_void:
 
 ; ALL:           move $25, $4
@@ -133,7 +133,7 @@ define void @tail_indirect_call_void_void(void ()* %addr) {
   ret void
 }
 
-define i32 @tail_indirect_call_i32_void(i32 ()* %addr) {
+define i32 @tail_indirect_call_i32_void(ptr %addr) {
 ; ALL-LABEL: tail_indirect_call_i32_void:
 
 ; ALL:           move $25, $4
@@ -144,7 +144,7 @@ define i32 @tail_indirect_call_i32_void(i32 ()* %addr) {
   ret i32 %1
 }
 
-define float @tail_indirect_call_float_void(float ()* %addr) {
+define float @tail_indirect_call_float_void(ptr %addr) {
 ; ALL-LABEL: tail_indirect_call_float_void:
 
 ; ALL:           move $25, $4
@@ -181,7 +181,7 @@ define i32 @jal_only_allows_symbols() {
 ; R6C:           jalrc $[[TGT]]
 ; ALL-NOT:       {{jal }}
 
-  call void () inttoptr (i32 1234 to void ()*)()
+  call void () inttoptr (i32 1234 to ptr)()
 ; R6C:           jrc $ra
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/indirectbr.ll b/llvm/test/CodeGen/Mips/llvm-ir/indirectbr.ll
index 8fed32aee9be6..aebeac9e5bd21 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/indirectbr.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/indirectbr.ll
@@ -12,7 +12,7 @@
 ; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | FileCheck %s -check-prefixes=ALL,NOT-R6
 ; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefixes=ALL,R6
 
-define i32 @br(i8 *%addr) {
+define i32 @br(ptr %addr) {
 ; ALL-LABEL: br:
 ; NOT-R6:        jr $4 # <MCInst #{{[0-9]+}} JR
 ; R6C:           jrc $4 # <MCInst #{{[0-9]+}} JIC
@@ -31,7 +31,7 @@ define i32 @br(i8 *%addr) {
 ; ALL:           addiu $2, $zero, 1
 
 entry:
-  indirectbr i8* %addr, [label %L1, label %L2]
+  indirectbr ptr %addr, [label %L1, label %L2]
 
 L1:
   ret i32 0

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll b/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
index 192e10ae8fc7d..4e434617d39ef 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/lh_lhu.ll
@@ -8,7 +8,7 @@ define i32 @lhfunc() {
 entry:
 ; CHECK-LABEL: lhfunc
 ; CHECK: lh $[[REG1:[0-9]+]], 0(${{[0-9]+}})
-  %0 = load i16, i16* @us, align 2
+  %0 = load i16, ptr @us, align 2
   %conv = sext i16 %0 to i32
   ret i32 %conv
 }
@@ -17,7 +17,7 @@ define i16 @lhfunc_atomic() {
 entry:
 ; CHECK-LABEL: lhfunc_atomic
 ; CHECK: lh $[[REG1:[0-9]+]], 0(${{[0-9]+}})
-  %0 = load atomic i16, i16* @us acquire, align 2
+  %0 = load atomic i16, ptr @us acquire, align 2
   ret i16 %0
 }
 
@@ -25,7 +25,7 @@ define i32 @lhufunc() {
 entry:
 ; CHECK-LABEL: lhufunc
 ; CHECK: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}})
-  %0 = load i16, i16* @us, align 2
+  %0 = load i16, ptr @us, align 2
   %conv = zext i16 %0 to i32
   ret i32 %conv
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/load-atomic.ll b/llvm/test/CodeGen/Mips/llvm-ir/load-atomic.ll
index baf9a74a2c54a..f401b0a1d6c9e 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/load-atomic.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/load-atomic.ll
@@ -5,38 +5,38 @@
 ; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
 ; RUN:    FileCheck %s -check-prefixes=ALL,M64
 
-define i8 @load_i8(i8* %ptr) {
+define i8 @load_i8(ptr %ptr) {
 ; ALL-LABEL: load_i8
 
 ; ALL: lb $2, 0($4)
 ; ALL: sync
-  %val = load atomic i8, i8* %ptr acquire, align 1
+  %val = load atomic i8, ptr %ptr acquire, align 1
   ret i8 %val
 }
 
-define i16 @load_i16(i16* %ptr) {
+define i16 @load_i16(ptr %ptr) {
 ; ALL-LABEL: load_i16
 
 ; ALL: lh $2, 0($4)
 ; ALL: sync
-  %val = load atomic i16, i16* %ptr acquire, align 2
+  %val = load atomic i16, ptr %ptr acquire, align 2
   ret i16 %val
 }
 
-define i32 @load_i32(i32* %ptr) {
+define i32 @load_i32(ptr %ptr) {
 ; ALL-LABEL: load_i32
 
 ; ALL: lw $2, 0($4)
 ; ALL: sync
-  %val = load atomic i32, i32* %ptr acquire, align 4
+  %val = load atomic i32, ptr %ptr acquire, align 4
   ret i32 %val
 }
 
-define i64 @load_i64(i64* %ptr) {
+define i64 @load_i64(ptr %ptr) {
 ; M64-LABEL: load_i64
 
 ; M64: ld $2, 0($4)
 ; M64: sync
-  %val = load atomic i64, i64* %ptr acquire, align 8
+  %val = load atomic i64, ptr %ptr acquire, align 8
   ret i64 %val
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/load.ll b/llvm/test/CodeGen/Mips/llvm-ir/load.ll
index c02e499adb0f3..b96bdff227cae 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/load.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/load.ll
@@ -181,7 +181,7 @@ define i8 @f1() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(a))>>
 entry:
-  %0 = load i8, i8 * @a
+  %0 = load i8, ptr @a
   ret i8 %0
 }
 
@@ -344,7 +344,7 @@ define i32 @f2() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(a))>>
 entry:
-  %0 = load i8, i8 * @a
+  %0 = load i8, ptr @a
   %1 = sext i8 %0 to i32
   ret i32 %1
 }
@@ -508,7 +508,7 @@ define i16 @f3() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(b))>>
 entry:
-  %0 = load i16, i16 * @b
+  %0 = load i16, ptr @b
   ret i16 %0
 }
 
@@ -671,7 +671,7 @@ define i32 @f4() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(b))>>
 entry:
-  %0 = load i16, i16 * @b
+  %0 = load i16, ptr @b
   %1 = sext i16 %0 to i32
   ret i32 %1
 }
@@ -835,7 +835,7 @@ define i32 @f5() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(c))>>
 entry:
-  %0 = load i32, i32 * @c
+  %0 = load i32, ptr @c
   ret i32 %0
 }
 
@@ -1019,7 +1019,7 @@ define i64 @f6() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Imm:0>>
 entry:
-  %0 = load i32, i32 * @c
+  %0 = load i32, ptr @c
   %1 = zext i32 %0 to i64
   ret i64 %1
 }
@@ -1207,7 +1207,7 @@ define i64 @f7() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Imm:31>>
 entry:
-  %0 = load i32, i32 * @c
+  %0 = load i32, ptr @c
   %1 = sext i32 %0 to i64
   ret i64 %1
 }
@@ -1371,7 +1371,7 @@ define float @f8() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(e))>>
 entry:
-  %0 = load float, float * @e
+  %0 = load float, ptr @e
   ret float %0
 }
 
@@ -1534,6 +1534,6 @@ define double @f9() {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(f))>>
 entry:
-  %0 = load double, double * @f
+  %0 = load double, ptr @f
   ret double %0
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/select-int.ll b/llvm/test/CodeGen/Mips/llvm-ir/select-int.ll
index 01c046feec879..8bf83c5c18ce7 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/select-int.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/select-int.ll
@@ -212,7 +212,7 @@ entry:
   ret i64 %r
 }
 
-define i8* @tst_select_word_cst(i8* %a, i8* %b) {
+define ptr @tst_select_word_cst(ptr %a, ptr %b) {
   ; ALL-LABEL: tst_select_word_cst:
 
   ; M2:         addiu   $[[T0:[0-9]+]], $zero, -1
@@ -268,7 +268,7 @@ define i8* @tst_select_word_cst(i8* %a, i8* %b) {
   ; MM32R6:     sltu    $[[T2:[0-9]+]], $zero, $[[T1]]
   ; MM32R6:     seleqz  $2, $4, $[[T2]]
 
-  %cmp = icmp eq i8* %b, inttoptr (i64 -1 to i8*)
-  %r = select i1 %cmp, i8* %a, i8* null
-  ret i8* %r
+  %cmp = icmp eq ptr %b, inttoptr (i64 -1 to ptr)
+  %r = select i1 %cmp, ptr %a, ptr null
+  ret ptr %r
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/store-atomic.ll b/llvm/test/CodeGen/Mips/llvm-ir/store-atomic.ll
index 8624cf6c1c66c..09713805c101c 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/store-atomic.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/store-atomic.ll
@@ -5,38 +5,38 @@
 ; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
 ; RUN:    FileCheck %s -check-prefixes=ALL,M64
 
-define void @store_i8(i8* %ptr, i8 signext %v) {
+define void @store_i8(ptr %ptr, i8 signext %v) {
 ; ALL-LABEL: store_i8
 
 ; ALL: sync
 ; ALL: sb $5, 0($4)
-  store atomic i8 %v, i8* %ptr release, align 1
+  store atomic i8 %v, ptr %ptr release, align 1
   ret void
 }
 
-define void @store_i16(i16* %ptr, i16 signext %v) {
+define void @store_i16(ptr %ptr, i16 signext %v) {
 ; ALL-LABEL: store_i16
 
 ; ALL: sync
 ; ALL: sh $5, 0($4)
-  store atomic i16 %v, i16* %ptr release, align 2
+  store atomic i16 %v, ptr %ptr release, align 2
   ret void
 }
 
-define void @store_i32(i32* %ptr, i32 signext %v) {
+define void @store_i32(ptr %ptr, i32 signext %v) {
 ; ALL-LABEL: store_i32
 
 ; ALL: sync
 ; ALL: sw $5, 0($4)
-  store atomic i32 %v, i32* %ptr release, align 4
+  store atomic i32 %v, ptr %ptr release, align 4
   ret void
 }
 
-define void @store_i64(i64* %ptr, i64 %v) {
+define void @store_i64(ptr %ptr, i64 %v) {
 ; M64-LABEL: store_i64
 
 ; M64: sync
 ; M64: sd $5, 0($4)
-  store atomic i64 %v, i64* %ptr release, align 8
+  store atomic i64 %v, ptr %ptr release, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/llvm-ir/store.ll b/llvm/test/CodeGen/Mips/llvm-ir/store.ll
index 11e923a2dafab..3922db72f2a7c 100644
--- a/llvm/test/CodeGen/Mips/llvm-ir/store.ll
+++ b/llvm/test/CodeGen/Mips/llvm-ir/store.ll
@@ -151,7 +151,7 @@ define void @f1(i8 %a) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(a))>>
-  store i8 %a, i8 * @a
+  store i8 %a, ptr @a
   ret void
 }
 
@@ -285,7 +285,7 @@ define void @f2(i16 %a) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(b))>>
-  store i16 %a, i16 * @b
+  store i16 %a, ptr @b
   ret void
 }
 
@@ -419,7 +419,7 @@ define void @f3(i32 %a) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(c))>>
-  store i32 %a, i32 * @c
+  store i32 %a, ptr @c
   ret void
 }
 
@@ -601,7 +601,7 @@ define void @f4(i64 %a) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Imm:4>>
-  store i64 %a, i64 * @d
+  store i64 %a, ptr @d
   ret void
 }
 
@@ -735,7 +735,7 @@ define void @f5(float %e) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(e))>>
-  store float %e, float * @e
+  store float %e, ptr @e
   ret void
 }
 
@@ -869,6 +869,6 @@ define void @f6(double %f) {
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Reg:{{[0-9]+}}>
 ; MIPS32R5FP643-NEXT:    # <MCOperand Expr:(%lo(f))>>
-  store double %f, double * @f
+  store double %f, ptr @f
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/long-calls.ll b/llvm/test/CodeGen/Mips/long-calls.ll
index 0b1a76d564a58..99fce03cd0059 100644
--- a/llvm/test/CodeGen/Mips/long-calls.ll
+++ b/llvm/test/CodeGen/Mips/long-calls.ll
@@ -17,7 +17,7 @@
 ; RUN:   | FileCheck -check-prefix=ON64 %s
 
 declare void @callee()
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
 
 @val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
 
@@ -52,6 +52,6 @@ define void @caller() {
 ; ON64: jalr    $25
 
   call void @callee()
-  call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([20 x i32]* @val to i8*), i8 0, i32 80, i1 false)
+  call void @llvm.memset.p0.i32(ptr align 4 @val, i8 0, i32 80, i1 false)
   ret  void
 }

diff  --git a/llvm/test/CodeGen/Mips/longbranch.ll b/llvm/test/CodeGen/Mips/longbranch.ll
index 43eaa50daf050..d348f03295811 100644
--- a/llvm/test/CodeGen/Mips/longbranch.ll
+++ b/llvm/test/CodeGen/Mips/longbranch.ll
@@ -313,7 +313,7 @@ entry:
   br i1 %cmp, label %end, label %then
 
 then:
-  store i32 1, i32* @x, align 4
+  store i32 1, ptr @x, align 4
   br label %end
 
 end:

diff  --git a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-1.ll b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-1.ll
index e52051192a87d..8b1c62933b0ee 100644
--- a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-1.ll
+++ b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-1.ll
@@ -14,24 +14,24 @@ define i32 @boo1(i32 signext %argc) {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
   call void asm sideeffect "test_label_1:", "~{$1}"()
-  %0 = load i32, i32* %argc.addr, align 4
+  %0 = load i32, ptr %argc.addr, align 4
   %cmp = icmp sgt i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
   call void asm sideeffect ".space 68435052", "~{$1}"()
-  %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
-  store i32 %call, i32* %retval, align 4
+  %call = call i32 @foo()
+  store i32 %call, ptr %retval, align 4
   br label %return
 
 if.end:
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 return:
-  %1 = load i32, i32* %retval, align 4
+  %1 = load i32, ptr %retval, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-2.ll b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-2.ll
index 7d843300fb4f6..6e897feefbec1 100644
--- a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-2.ll
+++ b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-2.ll
@@ -18,24 +18,24 @@ define i32 @boo2(i32 signext %argc) {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
   call void asm sideeffect "test_label_2:", "~{$1}"()
-  %0 = load i32, i32* %argc.addr, align 4
+  %0 = load i32, ptr %argc.addr, align 4
   %cmp = icmp sgt i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
   call void asm sideeffect ".space 268435052", "~{$1}"()
-  %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
-  store i32 %call, i32* %retval, align 4
+  %call = call i32 @foo()
+  store i32 %call, ptr %retval, align 4
   br label %return
 
 if.end:
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 return:
-  %1 = load i32, i32* %retval, align 4
+  %1 = load i32, ptr %retval, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-3.ll b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-3.ll
index 1fa78942af419..ac5a0c99192ae 100644
--- a/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-3.ll
+++ b/llvm/test/CodeGen/Mips/longbranch/long-branch-expansion-3.ll
@@ -52,24 +52,24 @@ define i32 @boo3(i32 signext %argc) {
 entry:
   %retval = alloca i32, align 4
   %argc.addr = alloca i32, align 4
-  store i32 0, i32* %retval, align 4
-  store i32 %argc, i32* %argc.addr, align 4
+  store i32 0, ptr %retval, align 4
+  store i32 %argc, ptr %argc.addr, align 4
   call void asm sideeffect "test_label_3:", "~{$1}"()
-  %0 = load i32, i32* %argc.addr, align 4
+  %0 = load i32, ptr %argc.addr, align 4
   %cmp = icmp sgt i32 %0, 1
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
   call void asm sideeffect ".space 268435452", "~{$1}"()
-  %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
-  store i32 %call, i32* %retval, align 4
+  %call = call i32 @foo()
+  store i32 %call, ptr %retval, align 4
   br label %return
 
 if.end:
-  store i32 0, i32* %retval, align 4
+  store i32 0, ptr %retval, align 4
   br label %return
 
 return:
-  %1 = load i32, i32* %retval, align 4
+  %1 = load i32, ptr %retval, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Mips/lw16-base-reg.ll b/llvm/test/CodeGen/Mips/lw16-base-reg.ll
index 9eeb5d2113535..f43e02e8d0616 100644
--- a/llvm/test/CodeGen/Mips/lw16-base-reg.ll
+++ b/llvm/test/CodeGen/Mips/lw16-base-reg.ll
@@ -8,16 +8,14 @@
 
 $_ZN1TaSERKS_ = comdat any
 
-define linkonce_odr void @_ZN1TaSERKS_(%struct.T* %this, %struct.T* dereferenceable(4) %t) #0 comdat align 2 {
+define linkonce_odr void @_ZN1TaSERKS_(ptr %this, ptr dereferenceable(4) %t) #0 comdat align 2 {
 entry:
-  %this.addr = alloca %struct.T*, align 4
-  %t.addr = alloca %struct.T*, align 4
-  %this1 = load %struct.T*, %struct.T** %this.addr, align 4
-  %0 = load %struct.T*, %struct.T** %t.addr, align 4
-  %V3 = getelementptr inbounds %struct.T, %struct.T* %0, i32 0, i32 0
-  %1 = load i32, i32* %V3, align 4
-  %V4 = getelementptr inbounds %struct.T, %struct.T* %this1, i32 0, i32 0
-  store i32 %1, i32* %V4, align 4
+  %this.addr = alloca ptr, align 4
+  %t.addr = alloca ptr, align 4
+  %this1 = load ptr, ptr %this.addr, align 4
+  %0 = load ptr, ptr %t.addr, align 4
+  %1 = load i32, ptr %0, align 4
+  store i32 %1, ptr %this1, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/machineverifier.ll b/llvm/test/CodeGen/Mips/machineverifier.ll
index d496b833a6c6b..39d2a7e4a9360 100644
--- a/llvm/test/CodeGen/Mips/machineverifier.ll
+++ b/llvm/test/CodeGen/Mips/machineverifier.ll
@@ -6,13 +6,13 @@
 
 define void @foo() nounwind {
 entry:
-  %0 = load i32, i32* @g, align 4
+  %0 = load i32, ptr @g, align 4
   %tobool = icmp eq i32 %0, 0
   br i1 %tobool, label %if.end, label %if.then
 
 if.then:                                          ; preds = %entry
   %add = add nsw i32 %0, 10
-  store i32 %add, i32* @g, align 4
+  store i32 %add, ptr @g, align 4
   br label %if.end
 
 if.end:                                           ; preds = %entry, %if.then

diff  --git a/llvm/test/CodeGen/Mips/mbrsize4a.ll b/llvm/test/CodeGen/Mips/mbrsize4a.ll
index 53da433fbcb50..7a08d169527de 100644
--- a/llvm/test/CodeGen/Mips/mbrsize4a.ll
+++ b/llvm/test/CodeGen/Mips/mbrsize4a.ll
@@ -8,27 +8,27 @@
 define i32 @main() #0 {
 entry:
   %retval = alloca i32, align 4
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   br label %z
 
 z:                                                ; preds = %y, %entry
-  %call = call i32 bitcast (i32 (...)* @foo to i32 ()*)()
+  %call = call i32 @foo()
   call void asm sideeffect ".space 10000000", ""() #2, !srcloc !1
   br label %y
 
 y:                                                ; preds = %z
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0))
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str)
   br label %z
 
 return:                                           ; No predecessors!
-  %0 = load i32, i32* %retval
+  %0 = load i32, ptr %retval
   ret i32 %0
 ; jal16: 	jal	$BB{{[0-9]+}}_{{[0-9]+}}
 }
 
 declare i32 @foo(...) #1
 
-declare i32 @printf(i8*, ...) #1
+declare i32 @printf(ptr, ...) #1
 
 attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
 attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

diff  --git a/llvm/test/CodeGen/Mips/memcpy.ll b/llvm/test/CodeGen/Mips/memcpy.ll
index 0feb1fc5862bf..554452457166d 100644
--- a/llvm/test/CodeGen/Mips/memcpy.ll
+++ b/llvm/test/CodeGen/Mips/memcpy.ll
@@ -4,16 +4,16 @@
 
 @.str = private unnamed_addr constant [31 x i8] c"abcdefghijklmnopqrstuvwxyzABCD\00", align 1
 
-define void @foo1(%struct.S1* %s1, i8 signext %n) nounwind {
+define void @foo1(ptr %s1, i8 signext %n) nounwind {
 entry:
 ; CHECK-NOT: call16(memcpy
 
-  %arraydecay = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 0
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %arraydecay, i8* align 1 getelementptr inbounds ([31 x i8], [31 x i8]* @.str, i32 0, i32 0), i32 31, i1 false)
-  %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 40
-  store i8 %n, i8* %arrayidx, align 1
+  %arraydecay = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1, i32 0
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 1 %arraydecay, ptr align 1 @.str, i32 31, i1 false)
+  %arrayidx = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1, i32 40
+  store i8 %n, ptr %arrayidx, align 1
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-addiu.ll b/llvm/test/CodeGen/Mips/micromips-addiu.ll
index 84ebc4349e1fa..5165994126a2a 100644
--- a/llvm/test/CodeGen/Mips/micromips-addiu.ll
+++ b/llvm/test/CodeGen/Mips/micromips-addiu.ll
@@ -8,24 +8,21 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   %addiu1 = add i32 %0, -7
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
-                                  ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu1)
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu1)
 
-  %1 = load i32, i32* @y, align 4
+  %1 = load i32, ptr @y, align 4
   %addiu2 = add i32 %1, 55
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
-                                  ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu2)
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu2)
 
-  %2 = load i32, i32* @z, align 4
+  %2 = load i32, ptr @z, align 4
   %addiu3 = add i32 %2, 24
-  %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
-                                  ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %addiu3)
+  %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %addiu3)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 ; CHECK: addius5  ${{[0-9]+}}, -7
 ; CHECK: addiu    ${{[0-9]+}}, ${{[0-9]+}}, 55

diff  --git a/llvm/test/CodeGen/Mips/micromips-addu16.ll b/llvm/test/CodeGen/Mips/micromips-addu16.ll
index 3ecdf2488d2c1..04243fc899f86 100644
--- a/llvm/test/CodeGen/Mips/micromips-addu16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-addu16.ll
@@ -7,11 +7,11 @@ entry:
   %a = alloca i32, align 4
   %b = alloca i32, align 4
   %c = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* %b, align 4
-  %1 = load i32, i32* %c, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr %b, align 4
+  %1 = load i32, ptr %c, align 4
   %add = add nsw i32 %0, %1
-  store i32 %add, i32* %a, align 4
+  store i32 %add, ptr %a, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-and16.ll b/llvm/test/CodeGen/Mips/micromips-and16.ll
index d0a16ac28a091..66cfb0807f9d9 100644
--- a/llvm/test/CodeGen/Mips/micromips-and16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-and16.ll
@@ -7,11 +7,11 @@ entry:
   %a = alloca i32, align 4
   %b = alloca i32, align 4
   %c = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* %b, align 4
-  %1 = load i32, i32* %c, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr %b, align 4
+  %1 = load i32, ptr %c, align 4
   %and = and i32 %0, %1
-  store i32 %and, i32* %a, align 4
+  store i32 %and, ptr %a, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-andi.ll b/llvm/test/CodeGen/Mips/micromips-andi.ll
index cd7a794cd1beb..c84a99fbef415 100644
--- a/llvm/test/CodeGen/Mips/micromips-andi.ll
+++ b/llvm/test/CodeGen/Mips/micromips-andi.ll
@@ -7,19 +7,17 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   %and1 = and i32 %0, 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
-                                  ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and1)
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %and1)
 
-  %1 = load i32, i32* @y, align 4
+  %1 = load i32, ptr @y, align 4
   %and2 = and i32 %1, 5
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds
-                                  ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %and2)
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %and2)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
 ; CHECK: andi16 ${{[2-7]|16|17}}, ${{[2-7]|16|17}}
 ; CHECK: andi   ${{[0-9]+}}, ${{[0-9]+}}

diff  --git a/llvm/test/CodeGen/Mips/micromips-atomic.ll b/llvm/test/CodeGen/Mips/micromips-atomic.ll
index e1e597635e74e..9cb277442d425 100644
--- a/llvm/test/CodeGen/Mips/micromips-atomic.ll
+++ b/llvm/test/CodeGen/Mips/micromips-atomic.ll
@@ -20,6 +20,6 @@ define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
 ; CHECK-NEXT:  # %bb.2: # %entry
 ; CHECK-NEXT:    jrc $ra
 entry:
-  %0 = atomicrmw add i32* @x, i32 %incr monotonic
+  %0 = atomicrmw add ptr @x, i32 %incr monotonic
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/micromips-atomic1.ll b/llvm/test/CodeGen/Mips/micromips-atomic1.ll
index 265acf60ae0fa..74e4f62662017 100644
--- a/llvm/test/CodeGen/Mips/micromips-atomic1.ll
+++ b/llvm/test/CodeGen/Mips/micromips-atomic1.ll
@@ -10,7 +10,7 @@
 
 define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
 entry:
-  %0 = atomicrmw add i8* @y, i8 %incr monotonic
+  %0 = atomicrmw add ptr @y, i8 %incr monotonic
   ret i8 %0
 
 ; MICROMIPS:     ll      ${{[0-9]+}}, 0(${{[0-9]+}})
@@ -19,7 +19,7 @@ entry:
 
 define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
 entry:
-  %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic
+  %pair0 = cmpxchg ptr @y, i8 %oldval, i8 %newval monotonic monotonic
   %0 = extractvalue { i8, i1 } %pair0, 0
   ret i8 %0
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-b-range.ll b/llvm/test/CodeGen/Mips/micromips-b-range.ll
index 37a521566bd69..064afff3da0eb 100644
--- a/llvm/test/CodeGen/Mips/micromips-b-range.ll
+++ b/llvm/test/CodeGen/Mips/micromips-b-range.ll
@@ -61,7 +61,7 @@
 @x = external global i32, align 4
 
 define void @foo() {
-  %1 = load i32, i32* @x, align 4
+  %1 = load i32, ptr @x, align 4
   %2 = icmp sgt i32 %1, 0
   br i1 %2, label %la, label %lf
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-compact-branches.ll b/llvm/test/CodeGen/Mips/micromips-compact-branches.ll
index 332cd8cd105c0..3b05e9e174f35 100644
--- a/llvm/test/CodeGen/Mips/micromips-compact-branches.ll
+++ b/llvm/test/CodeGen/Mips/micromips-compact-branches.ll
@@ -4,12 +4,12 @@
 define void @main() nounwind uwtable {
 entry:
   %x = alloca i32, align 4
-  %0 = load i32, i32* %x, align 4
+  %0 = load i32, ptr %x, align 4
   %cmp = icmp eq i32 %0, 0
   br i1 %cmp, label %if.then, label %if.end, !prof !1
 
 if.then:
-  store i32 10, i32* %x, align 4
+  store i32 10, ptr %x, align 4
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll b/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll
index e8327a3b3faf6..7d116211d7210 100644
--- a/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll
+++ b/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=mipsel -mcpu=mips32r2 -mattr=+micromips \
 ; RUN:   -relocation-model=static -O2 < %s | FileCheck %s
 
- at main.L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* null], align 4
+ at main.L = internal unnamed_addr constant [3 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr null], align 4
 @str = private unnamed_addr constant [2 x i8] c"A\00"
 @str2 = private unnamed_addr constant [2 x i8] c"B\00"
 
@@ -11,18 +11,18 @@ entry:
 
 L1:                                               ; preds = %entry, %L1
   %i.0 = phi i32 [ 0, %entry ], [ %inc, %L1 ]
-  %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+  %puts = tail call i32 @puts(ptr @str)
   %inc = add i32 %i.0, 1
-  %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @main.L, i32 0, i32 %i.0
-  %0 = load i8*, i8** %arrayidx, align 4, !tbaa !1
-  indirectbr i8* %0, [label %L1, label %L2]
+  %arrayidx = getelementptr inbounds [3 x ptr], ptr @main.L, i32 0, i32 %i.0
+  %0 = load ptr, ptr %arrayidx, align 4, !tbaa !1
+  indirectbr ptr %0, [label %L1, label %L2]
 
 L2:                                               ; preds = %L1
-  %puts2 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str2, i32 0, i32 0))
+  %puts2 = tail call i32 @puts(ptr @str2)
   ret i32 0
 }
 
-declare i32 @puts(i8* nocapture readonly) #1
+declare i32 @puts(ptr nocapture readonly) #1
 
 !1 = !{!2, !2, i64 0}
 !2 = !{!"any pointer", !3, i64 0}
@@ -34,11 +34,11 @@ declare i32 @puts(i8* nocapture readonly) #1
 %struct.foostruct = type { [3 x float] }
 %struct.barstruct = type { %struct.foostruct, float }
 @bar_ary = common global [4 x %struct.barstruct] zeroinitializer, align 4
-define float* @spooky(i32 signext %i) #0 {
+define ptr @spooky(i32 signext %i) #0 {
 
-  %safe = getelementptr inbounds [4 x %struct.barstruct], [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1
-  store float 1.420000e+02, float* %safe, align 4, !tbaa !1
-  ret float* %safe
+  %safe = getelementptr inbounds [4 x %struct.barstruct], ptr @bar_ary, i32 0, i32 %i, i32 1
+  store float 1.420000e+02, ptr %safe, align 4, !tbaa !1
+  ret ptr %safe
 }
 
 ; CHECK:      spooky:

diff  --git a/llvm/test/CodeGen/Mips/micromips-delay-slot.ll b/llvm/test/CodeGen/Mips/micromips-delay-slot.ll
index b0922992ff548..c21caf1405f63 100644
--- a/llvm/test/CodeGen/Mips/micromips-delay-slot.ll
+++ b/llvm/test/CodeGen/Mips/micromips-delay-slot.ll
@@ -7,8 +7,8 @@
 define i32 @foo(i32 signext %a) #0 {
 entry:
   %a.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %0 = load i32, i32* %a.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
   %shl = shl i32 %0, 2
   %call = call i32 @bar(i32 signext %shl)
   ret i32 %call

diff  --git a/llvm/test/CodeGen/Mips/micromips-gcc-except-table.ll b/llvm/test/CodeGen/Mips/micromips-gcc-except-table.ll
index 38a76927e2a8a..2b63aff01574e 100644
--- a/llvm/test/CodeGen/Mips/micromips-gcc-except-table.ll
+++ b/llvm/test/CodeGen/Mips/micromips-gcc-except-table.ll
@@ -4,34 +4,33 @@
 ; CHECK-NEXT: 0000 ff9b1501 0c011100 00110e1f 011f1800
 ; CHECK-NEXT: 0010 00010000 00000000
 
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 
-define dso_local i32 @main() local_unnamed_addr norecurse personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define dso_local i32 @main() local_unnamed_addr norecurse personality ptr @__gxx_personality_v0 {
 entry:
-  %exception.i = tail call i8* @__cxa_allocate_exception(i32 4) nounwind
-  %0 = bitcast i8* %exception.i to i32*
-  store i32 5, i32* %0, align 16
-  invoke void @__cxa_throw(i8* %exception.i, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
+  %exception.i = tail call ptr @__cxa_allocate_exception(i32 4) nounwind
+  store i32 5, ptr %exception.i, align 16
+  invoke void @__cxa_throw(ptr %exception.i, ptr @_ZTIi, ptr null) noreturn
           to label %.noexc unwind label %return
 
 .noexc:
   unreachable
 
 return:
-  %1 = landingpad { i8*, i32 }
-          catch i8* null
-  %2 = extractvalue { i8*, i32 } %1, 0
-  %3 = tail call i8* @__cxa_begin_catch(i8* %2) nounwind
+  %0 = landingpad { ptr, i32 }
+          catch ptr null
+  %1 = extractvalue { ptr, i32 } %0, 0
+  %2 = tail call ptr @__cxa_begin_catch(ptr %1) nounwind
   tail call void @__cxa_end_catch()
   ret i32 0
 }
 
 declare i32 @__gxx_personality_v0(...)
 
-declare i8* @__cxa_begin_catch(i8*) local_unnamed_addr
+declare ptr @__cxa_begin_catch(ptr) local_unnamed_addr
 
 declare void @__cxa_end_catch() local_unnamed_addr
 
-declare i8* @__cxa_allocate_exception(i32) local_unnamed_addr
+declare ptr @__cxa_allocate_exception(i32) local_unnamed_addr
 
-declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
+declare void @__cxa_throw(ptr, ptr, ptr) local_unnamed_addr

diff  --git a/llvm/test/CodeGen/Mips/micromips-gp-rc.ll b/llvm/test/CodeGen/Mips/micromips-gp-rc.ll
index f139f7a8486da..42f65463cf1cd 100644
--- a/llvm/test/CodeGen/Mips/micromips-gp-rc.ll
+++ b/llvm/test/CodeGen/Mips/micromips-gp-rc.ll
@@ -6,7 +6,7 @@
 ; Function Attrs: noreturn nounwind
 define void @foo() #0 {
 entry:
-  %0 = load i32, i32* @g, align 4
+  %0 = load i32, ptr @g, align 4
   tail call void @exit(i32 signext %0)
   unreachable
 }

diff  --git a/llvm/test/CodeGen/Mips/micromips-jal.ll b/llvm/test/CodeGen/Mips/micromips-jal.ll
index 3f5f91a34f853..27a75a7cb531c 100644
--- a/llvm/test/CodeGen/Mips/micromips-jal.ll
+++ b/llvm/test/CodeGen/Mips/micromips-jal.ll
@@ -5,10 +5,10 @@ define i32 @sum(i32 %a, i32 %b) nounwind uwtable {
 entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  %0 = load i32, i32* %a.addr, align 4
-  %1 = load i32, i32* %b.addr, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  %0 = load i32, ptr %a.addr, align 4
+  %1 = load i32, ptr %b.addr, align 4
   %add = add nsw i32 %0, %1
   ret i32 %add
 }
@@ -19,12 +19,12 @@ entry:
   %x = alloca i32, align 4
   %y = alloca i32, align 4
   %z = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* %y, align 4
-  %1 = load i32, i32* %z, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr %y, align 4
+  %1 = load i32, ptr %z, align 4
   %call = call i32 @sum(i32 %0, i32 %1)
-  store i32 %call, i32* %x, align 4
-  %2 = load i32, i32* %x, align 4
+  store i32 %call, ptr %x, align 4
+  %2 = load i32, ptr %x, align 4
   ret i32 %2
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-li.ll b/llvm/test/CodeGen/Mips/micromips-li.ll
index 997f4e9196afa..bfac23c25ea5b 100644
--- a/llvm/test/CodeGen/Mips/micromips-li.ll
+++ b/llvm/test/CodeGen/Mips/micromips-li.ll
@@ -7,9 +7,9 @@
 
 define i32 @main() nounwind {
 entry:
-  store i32 1, i32* @x, align 4
-  store i32 2148, i32* @y, align 4
-  store i32 33332, i32* @z, align 4
+  store i32 1, ptr @x, align 4
+  store i32 2148, ptr @y, align 4
+  store i32 33332, ptr @z, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll b/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll
index 4704580982125..5215c3a519989 100644
--- a/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll
+++ b/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll
@@ -1,16 +1,16 @@
 ; RUN: llc %s -march=mipsel -mattr=micromips -filetype=asm \
 ; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
 
-define i32 @sum(i32* %x, i32* %y) nounwind uwtable {
+define i32 @sum(ptr %x, ptr %y) nounwind uwtable {
 entry:
-  %x.addr = alloca i32*, align 8
-  %y.addr = alloca i32*, align 8
-  store i32* %x, i32** %x.addr, align 8
-  store i32* %y, i32** %y.addr, align 8
-  %0 = load i32*, i32** %x.addr, align 8
-  %1 = load i32, i32* %0, align 4
-  %2 = load i32*, i32** %y.addr, align 8
-  %3 = load i32, i32* %2, align 4
+  %x.addr = alloca ptr, align 8
+  %y.addr = alloca ptr, align 8
+  store ptr %x, ptr %x.addr, align 8
+  store ptr %y, ptr %y.addr, align 8
+  %0 = load ptr, ptr %x.addr, align 8
+  %1 = load i32, ptr %0, align 4
+  %2 = load ptr, ptr %y.addr, align 8
+  %3 = load i32, ptr %2, align 4
   %add = add nsw i32 %1, %3
   ret i32 %add
 }
@@ -20,8 +20,8 @@ entry:
   %retval = alloca i32, align 4
   %x = alloca i32, align 4
   %y = alloca i32, align 4
-  store i32 0, i32* %retval
-  %call = call i32 @sum(i32* %x, i32* %y)
+  store i32 0, ptr %retval
+  %call = call i32 @sum(ptr %x, ptr %y)
   ret i32 %call
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll b/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
index 787719338a39c..29d46bed4226d 100644
--- a/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
+++ b/llvm/test/CodeGen/Mips/micromips-lwc1-swc1.ll
@@ -16,7 +16,7 @@ entry:
 ; MM32:      lw      $[[R3:[0-9]+]], %got(gf0)($[[R2]])
 ; MM32:      lwc1    $f0, 0($[[R3]])
 
-  %0 = load float, float* @gf0, align 4
+  %0 = load float, ptr @gf0, align 4
   ret float %0
 }
 
@@ -29,7 +29,7 @@ entry:
 ; MM32:      lw      $[[R3:[0-9]+]], %got(gf0)($[[R2]])
 ; MM32:      swc1    $f12, 0($[[R3]])
 
-  store float %a, float* @gf0, align 4
+  store float %a, ptr @gf0, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-not16.ll b/llvm/test/CodeGen/Mips/micromips-not16.ll
index d31aefae6f0d9..2def472fce40a 100644
--- a/llvm/test/CodeGen/Mips/micromips-not16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-not16.ll
@@ -5,21 +5,21 @@ define i32 @main() {
 entry:
   %retval = alloca i32, align 4
   %x = alloca i64, align 8
-  store i32 0, i32* %retval
-  %0 = load i64, i64* %x, align 8
+  store i32 0, ptr %retval
+  %0 = load i64, ptr %x, align 8
   %cmp = icmp ne i64 %0, 9223372036854775807
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 1, i32* %retval
+  store i32 1, ptr %retval
   br label %return
 
 if.end:
-  store i32 0, i32* %retval
+  store i32 0, ptr %retval
   br label %return
 
 return:
-  %1 = load i32, i32* %retval
+  %1 = load i32, ptr %retval
   ret i32 %1
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll b/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll
index 837f799b40fa1..fe786b82df23f 100644
--- a/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll
+++ b/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll
@@ -10,6 +10,6 @@ entry:
 ; CHECK: rdhwr
 ; CHECK: .set  pop
 
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }

diff  --git a/llvm/test/CodeGen/Mips/micromips-shift.ll b/llvm/test/CodeGen/Mips/micromips-shift.ll
index a4f8ffe9408d2..11472b8ac99fa 100644
--- a/llvm/test/CodeGen/Mips/micromips-shift.ll
+++ b/llvm/test/CodeGen/Mips/micromips-shift.ll
@@ -10,13 +10,13 @@
 
 define i32 @shift_left() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %shl = shl i32 %0, 4
-  store i32 %shl, i32* @b, align 4
+  store i32 %shl, ptr @b, align 4
 
-  %1 = load i32, i32* @c, align 4
+  %1 = load i32, ptr @c, align 4
   %shl1 = shl i32 %1, 10
-  store i32 %shl1, i32* @d, align 4
+  store i32 %shl1, ptr @d, align 4
 
   ret i32 0
 }
@@ -31,13 +31,13 @@ entry:
 
 define i32 @shift_right() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %shr = lshr i32 %0, 4
-  store i32 %shr, i32* @j, align 4
+  store i32 %shr, ptr @j, align 4
 
-  %1 = load i32, i32* @m, align 4
+  %1 = load i32, ptr @m, align 4
   %shr1 = lshr i32 %1, 10
-  store i32 %shr1, i32* @n, align 4
+  store i32 %shr1, ptr @n, align 4
 
   ret i32 0
 }

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-addiur1sp-addiusp.ll b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-addiur1sp-addiusp.ll
index c7e96da379aa3..8949b19f4ada0 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-addiur1sp-addiusp.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-addiur1sp-addiusp.ll
@@ -7,11 +7,10 @@ entry:
 ; CHECK: addiur1sp
 ; CHECK: addiusp
   %a = alloca [10 x i32], align 4
-  %index = getelementptr inbounds [10 x i32], [10 x i32]* %a, i32 0, i32 0
-  call void @init(i32* %index)
-  %0 = load i32, i32* %index, align 4
+  call void @init(ptr %a)
+  %0 = load i32, ptr %a, align 4
   ret i32 %0
 }
 
-declare void @init(i32*)
+declare void @init(ptr)
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lbu16-lhu16-sb16-sh16.ll b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lbu16-lhu16-sb16-sh16.ll
index 804ea1e5c4388..f8f0955980cce 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lbu16-lhu16-sb16-sh16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lbu16-lhu16-sb16-sh16.ll
@@ -1,37 +1,37 @@
 ; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips -verify-machineinstrs < %s | FileCheck %s
 
-define void @f1(i8* %p) {
+define void @f1(ptr %p) {
 entry:
 ; CHECK-LABEL: f1:
 ; CHECK: lbu16
 ; CHECK: sb16
-  %0 = load i8, i8* %p, align 4
+  %0 = load i8, ptr %p, align 4
   %a = zext i8 %0 to i32
   %and = and i32 %a, 1
   %cmp = icmp eq i32 %and, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i8 0, i8* %p, align 1
+  store i8 0, ptr %p, align 1
   br label %if.end
 
 if.end:
   ret void
 }
 
-define void @f2(i16* %p) {
+define void @f2(ptr %p) {
 entry:
 ; CHECK-LABEL: f2:
 ; CHECK: lhu16
 ; CHECK: sh16
-  %0 = load i16, i16* %p, align 2
+  %0 = load i16, ptr %p, align 2
   %a = zext i16 %0 to i32
   %and = and i32 %a, 2
   %cmp = icmp eq i32 %and, 0
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i16 0, i16* %p, align 2
+  store i16 0, ptr %p, align 2
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.ll b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.ll
index 9481f32e17f1a..ee289a2d689f1 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.ll
@@ -3,7 +3,7 @@
 ; RUN: -verify-machineinstrs < %s | FileCheck %s
 
 ; Function Attrs: nounwind
-define i32 @fun(i32* %adr, i32 %val) {
+define i32 @fun(ptr %adr, i32 %val) {
 ; CHECK-LABEL: fun:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addiusp -32
@@ -24,10 +24,10 @@ define i32 @fun(i32* %adr, i32 %val) {
 ; CHECK-NEXT:    addiusp 32
 ; CHECK-NEXT:    jrc $ra
 entry:
-  %call1 =  call i32* @fun1()
-  store i32 %val, i32* %adr, align 4
+  %call1 =  call ptr @fun1()
+  store i32 %val, ptr %adr, align 4
   ret i32 0
 }
 
-declare i32* @fun1()
+declare ptr @fun1()
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll
index b92554854c046..46a963e2373f3 100644
--- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -march=mipsel -mcpu=mips32r2 -mattr=+micromips -asm-show-inst -verify-machineinstrs < %s | FileCheck %s
 
 ; Function Attrs: nounwind
-define i32 @function1(i32 (i32)* %f) {
+define i32 @function1(ptr %f) {
 entry:
 ; CHECK-LABEL: function1:
 ; CHECK: SWSP_MM

diff  --git a/llvm/test/CodeGen/Mips/micromips-subu16.ll b/llvm/test/CodeGen/Mips/micromips-subu16.ll
index d415574f443ad..874f6dfbcbfa3 100644
--- a/llvm/test/CodeGen/Mips/micromips-subu16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-subu16.ll
@@ -7,11 +7,11 @@ entry:
   %a = alloca i32, align 4
   %b = alloca i32, align 4
   %c = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* %b, align 4
-  %1 = load i32, i32* %c, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr %b, align 4
+  %1 = load i32, ptr %c, align 4
   %sub = sub nsw i32 %0, %1
-  store i32 %sub, i32* %a, align 4
+  store i32 %sub, ptr %a, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll b/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll
index 358372649b5db..c11c817b41651 100644
--- a/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll
@@ -2,22 +2,22 @@
 ; RUN: -relocation-model=pic -O3 -o - | FileCheck %s
 
 ; Function Attrs: noinline nounwind
-define void @bar(i32* %p) #0 {
+define void @bar(ptr %p) #0 {
 entry:
-  %p.addr = alloca i32*, align 4
-  store i32* %p, i32** %p.addr, align 4
-  %0 = load i32*, i32** %p.addr, align 4
-  %1 = load i32, i32* %0, align 4
+  %p.addr = alloca ptr, align 4
+  store ptr %p, ptr %p.addr, align 4
+  %0 = load ptr, ptr %p.addr, align 4
+  %1 = load i32, ptr %0, align 4
   %add = add nsw i32 7, %1
-  %2 = load i32*, i32** %p.addr, align 4
-  store i32 %add, i32* %2, align 4
-  %3 = load i32*, i32** %p.addr, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %3, i32 1
-  %4 = load i32, i32* %add.ptr, align 4
+  %2 = load ptr, ptr %p.addr, align 4
+  store i32 %add, ptr %2, align 4
+  %3 = load ptr, ptr %p.addr, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %3, i32 1
+  %4 = load i32, ptr %add.ptr, align 4
   %add1 = add nsw i32 7, %4
-  %5 = load i32*, i32** %p.addr, align 4
-  %add.ptr2 = getelementptr inbounds i32, i32* %5, i32 1
-  store i32 %add1, i32* %add.ptr2, align 4
+  %5 = load ptr, ptr %p.addr, align 4
+  %add.ptr2 = getelementptr inbounds i32, ptr %5, i32 1
+  store i32 %add1, ptr %add.ptr2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/micromips-sw.ll b/llvm/test/CodeGen/Mips/micromips-sw.ll
index 00c0141db18eb..989b6b6f2be9b 100644
--- a/llvm/test/CodeGen/Mips/micromips-sw.ll
+++ b/llvm/test/CodeGen/Mips/micromips-sw.ll
@@ -13,8 +13,8 @@ define void @fun(i32 %val) {
 ; MM6-LABEL: <fun>:
 ; MM6:         fb fd 00 14 sw $ra, 20($sp)
 entry:
-  call i32* @fun1()
+  call ptr @fun1()
   ret void
 }
 
-declare i32* @fun1()
+declare ptr @fun1()

diff  --git a/llvm/test/CodeGen/Mips/micromips-target-external-symbol-reloc.ll b/llvm/test/CodeGen/Mips/micromips-target-external-symbol-reloc.ll
index 7ea689ef99f4a..7ec28cc7276db 100644
--- a/llvm/test/CodeGen/Mips/micromips-target-external-symbol-reloc.ll
+++ b/llvm/test/CodeGen/Mips/micromips-target-external-symbol-reloc.ll
@@ -11,13 +11,12 @@
 ; MM6: JAL_MMR6 &memset
 ; MM6-NOT: JALRC16_MMR6
 
-define dso_local void @foo(i32* nocapture %ar) local_unnamed_addr {
+define dso_local void @foo(ptr nocapture %ar) local_unnamed_addr {
 entry:
   call void @bar()
-  %0 = bitcast i32* %ar to i8*
-  tail call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 100, i1 false)
+  tail call void @llvm.memset.p0.i32(ptr align 4 %ar, i8 0, i32 100, i1 false)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1)
 declare void @bar()

diff  --git a/llvm/test/CodeGen/Mips/micromips-xor16.ll b/llvm/test/CodeGen/Mips/micromips-xor16.ll
index 53c75acd4d3b9..a09531bc706cb 100644
--- a/llvm/test/CodeGen/Mips/micromips-xor16.ll
+++ b/llvm/test/CodeGen/Mips/micromips-xor16.ll
@@ -7,11 +7,11 @@ entry:
   %a = alloca i32, align 4
   %b = alloca i32, align 4
   %c = alloca i32, align 4
-  store i32 0, i32* %retval
-  %0 = load i32, i32* %b, align 4
-  %1 = load i32, i32* %c, align 4
+  store i32 0, ptr %retval
+  %0 = load i32, ptr %b, align 4
+  %1 = load i32, ptr %c, align 4
   %xor = xor i32 %0, %1
-  store i32 %xor, i32* %a, align 4
+  store i32 %xor, ptr %a, align 4
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips1-load-delay.ll b/llvm/test/CodeGen/Mips/mips1-load-delay.ll
index 516ecf70e4dec..c1bc264152fcc 100644
--- a/llvm/test/CodeGen/Mips/mips1-load-delay.ll
+++ b/llvm/test/CodeGen/Mips/mips1-load-delay.ll
@@ -5,22 +5,22 @@ target datalayout = "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64"
 target triple = "mipsel-unknown-unknown-elf"
 
 ; Function Attrs: noinline nounwind optnone
-define dso_local i32 @add_two_pointers(i32* %a, i32* %b) #0 {
+define dso_local i32 @add_two_pointers(ptr %a, ptr %b) #0 {
 entry:
 ; ALL-LABEL: add_two_pointers:
-  %a.addr = alloca i32*, align 4
-  %b.addr = alloca i32*, align 4
-  store i32* %a, i32** %a.addr, align 4
-  store i32* %b, i32** %b.addr, align 4
-  %0 = load i32*, i32** %a.addr, align 4
-  %1 = load i32, i32* %0, align 4
+  %a.addr = alloca ptr, align 4
+  %b.addr = alloca ptr, align 4
+  store ptr %a, ptr %a.addr, align 4
+  store ptr %b, ptr %b.addr, align 4
+  %0 = load ptr, ptr %a.addr, align 4
+  %1 = load i32, ptr %0, align 4
   ; ALL:        lw $1, 4($fp)
   ; MIPS1:      nop
   ; MIPS2-NOT:  nop
   ; MIPS32-NOT: nop
   ; ALL:        lw $1, 0($1)
-  %2 = load i32*, i32** %b.addr, align 4
-  %3 = load i32, i32* %2, align 4
+  %2 = load ptr, ptr %b.addr, align 4
+  %3 = load i32, ptr %2, align 4
   ; ALL:        lw $2, 0($fp)
   ; MIPS1:      nop
   ; MIPS2-NOT:  nop

diff  --git a/llvm/test/CodeGen/Mips/mips16_32_8.ll b/llvm/test/CodeGen/Mips/mips16_32_8.ll
index 9ec2788a929a7..5c0cd323615a7 100644
--- a/llvm/test/CodeGen/Mips/mips16_32_8.ll
+++ b/llvm/test/CodeGen/Mips/mips16_32_8.ll
@@ -10,7 +10,7 @@
 
 define void @foo() #0 {
 entry:
-  store i32 10, i32* @i, align 4
+  store i32 10, ptr @i, align 4
   ret void
 }
 
@@ -21,14 +21,14 @@ entry:
 
 define void @nofoo() #1 {
 entry:
-  store i32 20, i32* @i, align 4
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @y, align 4
+  store i32 20, ptr @i, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @y, align 4
   %add = fadd float %0, %1
-  store float %add, float* @f, align 4
-  %2 = load float, float* @f, align 4
+  store float %add, ptr @f, align 4
+  %2 = load float, ptr @f, align 4
   %conv = fpext float %2 to double
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), double %conv)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, double %conv)
   ret void
 }
 
@@ -43,16 +43,16 @@ entry:
 ; 32:	.set	macro
 ; 32:	.set	reorder
 ; 32:	.end	nofoo
-declare i32 @printf(i8*, ...) #2
+declare i32 @printf(ptr, ...) #2
 
 define i32 @main() #3 {
 entry:
   call void @foo()
-  %0 = load i32, i32* @i, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str1, i32 0, i32 0), i32 %0)
+  %0 = load i32, ptr @i, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str1, i32 %0)
   call void @nofoo()
-  %1 = load i32, i32* @i, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str2, i32 0, i32 0), i32 %1)
+  %1 = load i32, ptr @i, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str2, i32 %1)
   ret i32 0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips16_fpret.ll b/llvm/test/CodeGen/Mips/mips16_fpret.ll
index 651feba198083..ca9aa84fceea1 100644
--- a/llvm/test/CodeGen/Mips/mips16_fpret.ll
+++ b/llvm/test/CodeGen/Mips/mips16_fpret.ll
@@ -11,7 +11,7 @@
 
 define float @foox()  {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   ret float %0
 ; 1: 	.ent	foox
 ; 1:	lw	$2, %lo(x)(${{[0-9]+}})
@@ -20,7 +20,7 @@ entry:
 
 define double @foodx()  {
 entry:
-  %0 = load double, double* @dx, align 8
+  %0 = load double, ptr @dx, align 8
   ret double %0
 ; 1: 	.ent	foodx
 ; 1: 	lw	$2, %lo(dx)(${{[0-9]+}})
@@ -34,13 +34,13 @@ entry:
 define { float, float } @foocx()  {
 entry:
   %retval = alloca { float, float }, align 4
-  %cx.real = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 0)
-  %cx.imag = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @cx, i32 0, i32 1)
-  %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
-  %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
-  store float %cx.real, float* %real
-  store float %cx.imag, float* %imag
-  %0 = load { float, float }, { float, float }* %retval
+  %cx.real = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 0)
+  %cx.imag = load float, ptr getelementptr inbounds ({ float, float }, ptr @cx, i32 0, i32 1)
+  %real = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 0
+  %imag = getelementptr inbounds { float, float }, ptr %retval, i32 0, i32 1
+  store float %cx.real, ptr %real
+  store float %cx.imag, ptr %imag
+  %0 = load { float, float }, ptr %retval
   ret { float, float } %0
 ; 1: 	.ent	foocx
 ; 1: 	lw	$2, %lo(cx)(${{[0-9]+}})
@@ -53,13 +53,13 @@ entry:
 define { double, double } @foodcx()  {
 entry:
   %retval = alloca { double, double }, align 8
-  %dcx.real = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 0)
-  %dcx.imag = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @dcx, i32 0, i32 1)
-  %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
-  %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
-  store double %dcx.real, double* %real
-  store double %dcx.imag, double* %imag
-  %0 = load { double, double }, { double, double }* %retval
+  %dcx.real = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 0)
+  %dcx.imag = load double, ptr getelementptr inbounds ({ double, double }, ptr @dcx, i32 0, i32 1)
+  %real = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 0
+  %imag = getelementptr inbounds { double, double }, ptr %retval, i32 0, i32 1
+  store double %dcx.real, ptr %real
+  store double %dcx.imag, ptr %imag
+  %0 = load { double, double }, ptr %retval
   ret { double, double } %0
 ; 1: 	.ent	foodcx
 ; 1: 	lw	${{[0-9]}}, %lo(dcx)(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/mips16ex.ll b/llvm/test/CodeGen/Mips/mips16ex.ll
index fe16fee94fa78..609f53bae6492 100644
--- a/llvm/test/CodeGen/Mips/mips16ex.ll
+++ b/llvm/test/CodeGen/Mips/mips16ex.ll
@@ -6,46 +6,44 @@
 ;16-NEXT: .cfi_startproc
 ;16-NEXT: .cfi_personality
 @.str = private unnamed_addr constant [7 x i8] c"hello\0A\00", align 1
- at _ZTIi = external constant i8*
+ at _ZTIi = external constant ptr
 @.str1 = private unnamed_addr constant [15 x i8] c"exception %i \0A\00", align 1
 
-define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define i32 @main() personality ptr @__gxx_personality_v0 {
 entry:
   %retval = alloca i32, align 4
-  %exn.slot = alloca i8*
+  %exn.slot = alloca ptr
   %ehselector.slot = alloca i32
   %e = alloca i32, align 4
-  store i32 0, i32* %retval
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0))
-  %exception = call i8* @__cxa_allocate_exception(i32 4) nounwind
-  %0 = bitcast i8* %exception to i32*
-  store i32 20, i32* %0
-  invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) noreturn
+  store i32 0, ptr %retval
+  %call = call i32 (ptr, ...) @printf(ptr @.str)
+  %exception = call ptr @__cxa_allocate_exception(i32 4) nounwind
+  store i32 20, ptr %exception
+  invoke void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null) noreturn
           to label %unreachable unwind label %lpad
 
 lpad:                                             ; preds = %entry
-  %1 = landingpad { i8*, i32 }
-          catch i8* bitcast (i8** @_ZTIi to i8*)
-  %2 = extractvalue { i8*, i32 } %1, 0
-  store i8* %2, i8** %exn.slot
-  %3 = extractvalue { i8*, i32 } %1, 1
-  store i32 %3, i32* %ehselector.slot
+  %0 = landingpad { ptr, i32 }
+          catch ptr @_ZTIi
+  %1 = extractvalue { ptr, i32 } %0, 0
+  store ptr %1, ptr %exn.slot
+  %2 = extractvalue { ptr, i32 } %0, 1
+  store i32 %2, ptr %ehselector.slot
   br label %catch.dispatch
 
 catch.dispatch:                                   ; preds = %lpad
-  %sel = load i32, i32* %ehselector.slot
-  %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind
-  %matches = icmp eq i32 %sel, %4
+  %sel = load i32, ptr %ehselector.slot
+  %3 = call i32 @llvm.eh.typeid.for(ptr @_ZTIi) nounwind
+  %matches = icmp eq i32 %sel, %3
   br i1 %matches, label %catch, label %eh.resume
 
 catch:                                            ; preds = %catch.dispatch
-  %exn = load i8*, i8** %exn.slot
-  %5 = call i8* @__cxa_begin_catch(i8* %exn) nounwind
-  %6 = bitcast i8* %5 to i32*
-  %exn.scalar = load i32, i32* %6
-  store i32 %exn.scalar, i32* %e, align 4
-  %7 = load i32, i32* %e, align 4
-  %call2 = invoke i32 (i8*, ...) @printf(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str1, i32 0, i32 0), i32 %7)
+  %exn = load ptr, ptr %exn.slot
+  %4 = call ptr @__cxa_begin_catch(ptr %exn) nounwind
+  %exn.scalar = load i32, ptr %4
+  store i32 %exn.scalar, ptr %e, align 4
+  %5 = load i32, ptr %e, align 4
+  %call2 = invoke i32 (ptr, ...) @printf(ptr @.str1, i32 %5)
           to label %invoke.cont unwind label %lpad1
 
 invoke.cont:                                      ; preds = %catch
@@ -56,36 +54,36 @@ try.cont:                                         ; preds = %invoke.cont
   ret i32 0
 
 lpad1:                                            ; preds = %catch
-  %8 = landingpad { i8*, i32 }
+  %6 = landingpad { ptr, i32 }
           cleanup
-  %9 = extractvalue { i8*, i32 } %8, 0
-  store i8* %9, i8** %exn.slot
-  %10 = extractvalue { i8*, i32 } %8, 1
-  store i32 %10, i32* %ehselector.slot
+  %7 = extractvalue { ptr, i32 } %6, 0
+  store ptr %7, ptr %exn.slot
+  %8 = extractvalue { ptr, i32 } %6, 1
+  store i32 %8, ptr %ehselector.slot
   call void @__cxa_end_catch() nounwind
   br label %eh.resume
 
 eh.resume:                                        ; preds = %lpad1, %catch.dispatch
-  %exn3 = load i8*, i8** %exn.slot
-  %sel4 = load i32, i32* %ehselector.slot
-  %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0
-  %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1
-  resume { i8*, i32 } %lpad.val5
+  %exn3 = load ptr, ptr %exn.slot
+  %sel4 = load i32, ptr %ehselector.slot
+  %lpad.val = insertvalue { ptr, i32 } undef, ptr %exn3, 0
+  %lpad.val5 = insertvalue { ptr, i32 } %lpad.val, i32 %sel4, 1
+  resume { ptr, i32 } %lpad.val5
 
 unreachable:                                      ; preds = %entry
   unreachable
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)
 
-declare i8* @__cxa_allocate_exception(i32)
+declare ptr @__cxa_allocate_exception(i32)
 
 declare i32 @__gxx_personality_v0(...)
 
-declare void @__cxa_throw(i8*, i8*, i8*)
+declare void @__cxa_throw(ptr, ptr, ptr)
 
-declare i32 @llvm.eh.typeid.for(i8*) nounwind readnone
+declare i32 @llvm.eh.typeid.for(ptr) nounwind readnone
 
-declare i8* @__cxa_begin_catch(i8*)
+declare ptr @__cxa_begin_catch(ptr)
 
 declare void @__cxa_end_catch()

diff  --git a/llvm/test/CodeGen/Mips/mips16fpe.ll b/llvm/test/CodeGen/Mips/mips16fpe.ll
index b8f1d945f3564..310213b43b002 100644
--- a/llvm/test/CodeGen/Mips/mips16fpe.ll
+++ b/llvm/test/CodeGen/Mips/mips16fpe.ll
@@ -43,10 +43,10 @@
 define void @test_addsf3() nounwind {
 entry:
 ;16hf-LABEL: test_addsf3:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @y, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @y, align 4
   %add = fadd float %0, %1
-  store float %add, float* @addsf3_result, align 4
+  store float %add, ptr @addsf3_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}})
   ret void
 }
@@ -54,10 +54,10 @@ entry:
 define void @test_adddf3() nounwind {
 entry:
 ;16hf-LABEL: test_adddf3:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @yd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @yd, align 8
   %add = fadd double %0, %1
-  store double %add, double* @adddf3_result, align 8
+  store double %add, ptr @adddf3_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}})
   ret void
 }
@@ -65,10 +65,10 @@ entry:
 define void @test_subsf3() nounwind {
 entry:
 ;16hf-LABEL: test_subsf3:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @y, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @y, align 4
   %sub = fsub float %0, %1
-  store float %sub, float* @subsf3_result, align 4
+  store float %sub, ptr @subsf3_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}})
   ret void
 }
@@ -76,10 +76,10 @@ entry:
 define void @test_subdf3() nounwind {
 entry:
 ;16hf-LABEL: test_subdf3:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @yd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @yd, align 8
   %sub = fsub double %0, %1
-  store double %sub, double* @subdf3_result, align 8
+  store double %sub, ptr @subdf3_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}})
   ret void
 }
@@ -87,10 +87,10 @@ entry:
 define void @test_mulsf3() nounwind {
 entry:
 ;16hf-LABEL: test_mulsf3:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @y, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @y, align 4
   %mul = fmul float %0, %1
-  store float %mul, float* @mulsf3_result, align 4
+  store float %mul, ptr @mulsf3_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}})
   ret void
 }
@@ -98,10 +98,10 @@ entry:
 define void @test_muldf3() nounwind {
 entry:
 ;16hf-LABEL: test_muldf3:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @yd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @yd, align 8
   %mul = fmul double %0, %1
-  store double %mul, double* @muldf3_result, align 8
+  store double %mul, ptr @muldf3_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}})
   ret void
 }
@@ -109,10 +109,10 @@ entry:
 define void @test_divsf3() nounwind {
 entry:
 ;16hf-LABEL: test_divsf3:
-  %0 = load float, float* @y, align 4
-  %1 = load float, float* @x, align 4
+  %0 = load float, ptr @y, align 4
+  %1 = load float, ptr @x, align 4
   %div = fdiv float %0, %1
-  store float %div, float* @divsf3_result, align 4
+  store float %div, ptr @divsf3_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}})
   ret void
 }
@@ -120,11 +120,11 @@ entry:
 define void @test_divdf3() nounwind {
 entry:
 ;16hf-LABEL: test_divdf3:
-  %0 = load double, double* @yd, align 8
+  %0 = load double, ptr @yd, align 8
   %mul = fmul double %0, 2.000000e+00
-  %1 = load double, double* @xd, align 8
+  %1 = load double, ptr @xd, align 8
   %div = fdiv double %mul, %1
-  store double %div, double* @divdf3_result, align 8
+  store double %div, ptr @divdf3_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}})
   ret void
 }
@@ -132,9 +132,9 @@ entry:
 define void @test_extendsfdf2() nounwind {
 entry:
 ;16hf-LABEL: test_extendsfdf2:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %conv = fpext float %0 to double
-  store double %conv, double* @extendsfdf2_result, align 8
+  store double %conv, ptr @extendsfdf2_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}})
   ret void
 }
@@ -142,9 +142,9 @@ entry:
 define void @test_truncdfsf2() nounwind {
 entry:
 ;16hf-LABEL: test_truncdfsf2:
-  %0 = load double, double* @xd2, align 8
+  %0 = load double, ptr @xd2, align 8
   %conv = fptrunc double %0 to float
-  store float %conv, float* @truncdfsf2_result, align 4
+  store float %conv, ptr @truncdfsf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}})
   ret void
 }
@@ -152,9 +152,9 @@ entry:
 define void @test_fix_truncsfsi() nounwind {
 entry:
 ;16hf-LABEL: test_fix_truncsfsi:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %conv = fptosi float %0 to i32
-  store i32 %conv, i32* @fix_truncsfsi_result, align 4
+  store i32 %conv, ptr @fix_truncsfsi_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}})
   ret void
 }
@@ -162,9 +162,9 @@ entry:
 define void @test_fix_truncdfsi() nounwind {
 entry:
 ;16hf-LABEL: test_fix_truncdfsi:
-  %0 = load double, double* @xd, align 8
+  %0 = load double, ptr @xd, align 8
   %conv = fptosi double %0 to i32
-  store i32 %conv, i32* @fix_truncdfsi_result, align 4
+  store i32 %conv, ptr @fix_truncdfsi_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}})
   ret void
 }
@@ -172,9 +172,9 @@ entry:
 define void @test_floatsisf() nounwind {
 entry:
 ;16hf-LABEL: test_floatsisf:
-  %0 = load i32, i32* @si, align 4
+  %0 = load i32, ptr @si, align 4
   %conv = sitofp i32 %0 to float
-  store float %conv, float* @floatsisf_result, align 4
+  store float %conv, ptr @floatsisf_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}})
   ret void
 }
@@ -182,9 +182,9 @@ entry:
 define void @test_floatsidf() nounwind {
 entry:
 ;16hf-LABEL: test_floatsidf:
-  %0 = load i32, i32* @si, align 4
+  %0 = load i32, ptr @si, align 4
   %conv = sitofp i32 %0 to double
-  store double %conv, double* @floatsidf_result, align 8
+  store double %conv, ptr @floatsidf_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}})
   ret void
 }
@@ -192,9 +192,9 @@ entry:
 define void @test_floatunsisf() nounwind {
 entry:
 ;16hf-LABEL: test_floatunsisf:
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = uitofp i32 %0 to float
-  store float %conv, float* @floatunsisf_result, align 4
+  store float %conv, ptr @floatunsisf_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}})
   ret void
 }
@@ -202,9 +202,9 @@ entry:
 define void @test_floatunsidf() nounwind {
 entry:
 ;16hf-LABEL: test_floatunsidf:
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = uitofp i32 %0 to double
-  store double %conv, double* @floatunsidf_result, align 8
+  store double %conv, ptr @floatunsidf_result, align 8
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}})
   ret void
 }
@@ -212,11 +212,11 @@ entry:
 define void @test_eqsf2() nounwind {
 entry:
 ;16hf-LABEL: test_eqsf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @xx, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @xx, align 4
   %cmp = fcmp oeq float %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @eqsf2_result, align 4
+  store i32 %conv, ptr @eqsf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_eqsf2)(${{[0-9]+}})
   ret void
 }
@@ -224,11 +224,11 @@ entry:
 define void @test_eqdf2() nounwind {
 entry:
 ;16hf-LABEL: test_eqdf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @xxd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @xxd, align 8
   %cmp = fcmp oeq double %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @eqdf2_result, align 4
+  store i32 %conv, ptr @eqdf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_eqdf2)(${{[0-9]+}})
   ret void
 }
@@ -236,11 +236,11 @@ entry:
 define void @test_nesf2() nounwind {
 entry:
 ;16hf-LABEL: test_nesf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @y, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @y, align 4
   %cmp = fcmp une float %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @nesf2_result, align 4
+  store i32 %conv, ptr @nesf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_nesf2)(${{[0-9]+}})
   ret void
 }
@@ -248,11 +248,11 @@ entry:
 define void @test_nedf2() nounwind {
 entry:
 ;16hf-LABEL: test_nedf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @yd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @yd, align 8
   %cmp = fcmp une double %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @nedf2_result, align 4
+  store i32 %conv, ptr @nedf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_nedf2)(${{[0-9]+}})
   ret void
 }
@@ -260,14 +260,14 @@ entry:
 define void @test_gesf2() nounwind {
 entry:
 ;16hf-LABEL: test_gesf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @xx, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @xx, align 4
   %cmp = fcmp oge float %0, %1
-  %2 = load float, float* @y, align 4
+  %2 = load float, ptr @y, align 4
   %cmp1 = fcmp oge float %2, %0
   %and3 = and i1 %cmp, %cmp1
   %and = zext i1 %and3 to i32
-  store i32 %and, i32* @gesf2_result, align 4
+  store i32 %and, ptr @gesf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_gesf2)(${{[0-9]+}})
   ret void
 }
@@ -275,14 +275,14 @@ entry:
 define void @test_gedf2() nounwind {
 entry:
 ;16hf-LABEL: test_gedf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @xxd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @xxd, align 8
   %cmp = fcmp oge double %0, %1
-  %2 = load double, double* @yd, align 8
+  %2 = load double, ptr @yd, align 8
   %cmp1 = fcmp oge double %2, %0
   %and3 = and i1 %cmp, %cmp1
   %and = zext i1 %and3 to i32
-  store i32 %and, i32* @gedf2_result, align 4
+  store i32 %and, ptr @gedf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_gedf2)(${{[0-9]+}})
   ret void
 }
@@ -290,14 +290,14 @@ entry:
 define void @test_ltsf2() nounwind {
 entry:
 ;16hf-LABEL: test_ltsf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @xx, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @xx, align 4
   %lnot = fcmp uge float %0, %1
-  %2 = load float, float* @y, align 4
+  %2 = load float, ptr @y, align 4
   %cmp1 = fcmp olt float %0, %2
   %and2 = and i1 %lnot, %cmp1
   %and = zext i1 %and2 to i32
-  store i32 %and, i32* @ltsf2_result, align 4
+  store i32 %and, ptr @ltsf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}})
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_ltsf2)(${{[0-9]+}})
   ret void
@@ -306,14 +306,14 @@ entry:
 define void @test_ltdf2() nounwind {
 entry:
 ;16hf-LABEL: test_ltdf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @xxd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @xxd, align 8
   %lnot = fcmp uge double %0, %1
-  %2 = load double, double* @yd, align 8
+  %2 = load double, ptr @yd, align 8
   %cmp1 = fcmp olt double %0, %2
   %and2 = and i1 %lnot, %cmp1
   %and = zext i1 %and2 to i32
-  store i32 %and, i32* @ltdf2_result, align 4
+  store i32 %and, ptr @ltdf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}})
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_ltdf2)(${{[0-9]+}})
   ret void
@@ -322,14 +322,14 @@ entry:
 define void @test_lesf2() nounwind {
 entry:
 ;16hf-LABEL: test_lesf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @xx, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @xx, align 4
   %cmp = fcmp ole float %0, %1
-  %2 = load float, float* @y, align 4
+  %2 = load float, ptr @y, align 4
   %cmp1 = fcmp ole float %0, %2
   %and3 = and i1 %cmp, %cmp1
   %and = zext i1 %and3 to i32
-  store i32 %and, i32* @lesf2_result, align 4
+  store i32 %and, ptr @lesf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_lesf2)(${{[0-9]+}})
   ret void
 }
@@ -337,14 +337,14 @@ entry:
 define void @test_ledf2() nounwind {
 entry:
 ;16hf-LABEL: test_ledf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @xxd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @xxd, align 8
   %cmp = fcmp ole double %0, %1
-  %2 = load double, double* @yd, align 8
+  %2 = load double, ptr @yd, align 8
   %cmp1 = fcmp ole double %0, %2
   %and3 = and i1 %cmp, %cmp1
   %and = zext i1 %and3 to i32
-  store i32 %and, i32* @ledf2_result, align 4
+  store i32 %and, ptr @ledf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_ledf2)(${{[0-9]+}})
   ret void
 }
@@ -352,14 +352,14 @@ entry:
 define void @test_gtsf2() nounwind {
 entry:
 ;16hf-LABEL: test_gtsf2:
-  %0 = load float, float* @x, align 4
-  %1 = load float, float* @xx, align 4
+  %0 = load float, ptr @x, align 4
+  %1 = load float, ptr @xx, align 4
   %lnot = fcmp ule float %0, %1
-  %2 = load float, float* @y, align 4
+  %2 = load float, ptr @y, align 4
   %cmp1 = fcmp ogt float %2, %0
   %and2 = and i1 %lnot, %cmp1
   %and = zext i1 %and2 to i32
-  store i32 %and, i32* @gtsf2_result, align 4
+  store i32 %and, ptr @gtsf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_gtsf2)(${{[0-9]+}})
   ret void
 }
@@ -367,14 +367,14 @@ entry:
 define void @test_gtdf2() nounwind {
 entry:
 ;16hf-LABEL: test_gtdf2:
-  %0 = load double, double* @xd, align 8
-  %1 = load double, double* @xxd, align 8
+  %0 = load double, ptr @xd, align 8
+  %1 = load double, ptr @xxd, align 8
   %lnot = fcmp ule double %0, %1
-  %2 = load double, double* @yd, align 8
+  %2 = load double, ptr @yd, align 8
   %cmp1 = fcmp ogt double %2, %0
   %and2 = and i1 %lnot, %cmp1
   %and = zext i1 %and2 to i32
-  store i32 %and, i32* @gtdf2_result, align 4
+  store i32 %and, ptr @gtdf2_result, align 4
 ;16hf:  lw	${{[0-9]+}}, %call16(__mips16_gtdf2)(${{[0-9]+}})
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/mips3-spill-slot.ll b/llvm/test/CodeGen/Mips/mips3-spill-slot.ll
index 182473ad4537c..4eabbeb867e38 100644
--- a/llvm/test/CodeGen/Mips/mips3-spill-slot.ll
+++ b/llvm/test/CodeGen/Mips/mips3-spill-slot.ll
@@ -6,49 +6,49 @@
 
 ; CHECK-NOT: Cannot scavenge register without an emergency spill slot!
 
- at n = external local_unnamed_addr global i32*, align 8
+ at n = external local_unnamed_addr global ptr, align 8
 
-define void @o(i32* nocapture readonly %a, i64* %b) local_unnamed_addr {
+define void @o(ptr nocapture readonly %a, ptr %b) local_unnamed_addr {
 entry:
-  %0 = load i32, i32* undef, align 4
+  %0 = load i32, ptr undef, align 4
   %and12 = and i32 %0, 67295
   %1 = zext i32 %and12 to i64
   %conv16 = sext i32 %0 to i64
-  %2 = ptrtoint i64* %b to i64
+  %2 = ptrtoint ptr %b to i64
   %mul22 = mul nsw i64 %1, %2
   %mul23 = mul nsw i64 %conv16, %2
   %tobool25 = icmp ne i64 %mul22, 0
   %inc27 = zext i1 %tobool25 to i64
-  %3 = load i32*, i32** @n, align 8
-  %arrayidx36 = getelementptr inbounds i32, i32* %3, i64 4
-  store i32 0, i32* %arrayidx36, align 4
+  %3 = load ptr, ptr @n, align 8
+  %arrayidx36 = getelementptr inbounds i32, ptr %3, i64 4
+  store i32 0, ptr %arrayidx36, align 4
   %spec.select = add i64 0, %mul23
   %hi14.0 = add i64 %spec.select, %inc27
   %add51 = add i64 %hi14.0, 0
-  %4 = load i32, i32* null, align 4
+  %4 = load i32, ptr null, align 4
   %and59 = and i32 %4, 67295
   %5 = zext i32 %and59 to i64
   %conv63 = sext i32 %4 to i64
-  %6 = load i64, i64* %b, align 8
+  %6 = load i64, ptr %b, align 8
   %mul71 = mul nsw i64 %6, %5
   %mul72 = mul nsw i64 %6, %conv63
   %tobool74 = icmp ne i64 %mul71, 0
   %inc76 = zext i1 %tobool74 to i64
-  %arrayidx85 = getelementptr inbounds i32, i32* %a, i64 5
-  %7 = load i32, i32* %arrayidx85, align 4
+  %arrayidx85 = getelementptr inbounds i32, ptr %a, i64 5
+  %7 = load i32, ptr %arrayidx85, align 4
   %and86 = and i32 %7, 67295
   %conv90 = sext i32 %7 to i64
-  %8 = load i64, i64* undef, align 8
+  %8 = load i64, ptr undef, align 8
   %mul99 = mul nsw i64 %8, %conv90
-  %9 = load i32, i32* undef, align 4
+  %9 = load i32, ptr undef, align 4
   %and113 = and i32 %9, 67295
   %tobool126 = icmp eq i32 %and113, 0
   %spec.select397.v = select i1 %tobool126, i64 2, i64 3
-  %10 = load i32, i32* undef, align 4
+  %10 = load i32, ptr undef, align 4
   %and138 = and i32 %10, 67295
   %11 = zext i32 %and138 to i64
   %conv142 = sext i32 %10 to i64
-  %12 = load i64, i64* null, align 8
+  %12 = load i64, ptr null, align 8
   %mul150 = mul nsw i64 %12, %11
   %mul151 = mul nsw i64 %12, %conv142
   %tobool153 = icmp ne i64 %mul150, 0
@@ -68,7 +68,7 @@ entry:
   %add110 = add i64 %add83, 0
   %add135 = add i64 %add110, 0
   %add162 = add i64 %add135, 0
-  %13 = load i32, i32* null, align 4
+  %13 = load i32, ptr null, align 4
   %and165 = and i32 %13, 67295
   %14 = zext i32 %and165 to i64
   %conv169 = sext i32 %13 to i64
@@ -90,7 +90,7 @@ entry:
   %inc210 = zext i1 %tobool208 to i64
   %hi192.0 = add i64 %spec.select400, %add157
   %add212 = add i64 %hi192.0, %inc210
-  %15 = inttoptr i64 %add212 to i32*
-  store i32* %15, i32** @n, align 8
+  %15 = inttoptr i64 %add212 to ptr
+  store ptr %15, ptr @n, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/mips64-f128-call.ll b/llvm/test/CodeGen/Mips/mips64-f128-call.ll
index 19fa8fc752450..879dc902eeb33 100644
--- a/llvm/test/CodeGen/Mips/mips64-f128-call.ll
+++ b/llvm/test/CodeGen/Mips/mips64-f128-call.ll
@@ -9,7 +9,7 @@
 
 define void @foo0(fp128 %a0) {
 entry:
-  store fp128 %a0, fp128* @gld0, align 16
+  store fp128 %a0, ptr @gld0, align 16
   ret void
 }
 
@@ -19,7 +19,7 @@ entry:
 
 define void @foo1() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   tail call void @foo2(fp128 %0)
   ret void
 }
@@ -42,8 +42,8 @@ declare void @foo2(fp128)
 define fp128 @foo3() {
 entry:
   %call = tail call fp128 @foo4()
-  store fp128 %call, fp128* @gld0, align 16
-  %0 = load fp128, fp128* @gld1, align 16
+  store fp128 %call, ptr @gld0, align 16
+  %0 = load fp128, ptr @gld1, align 16
   ret fp128 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips64-f128.ll b/llvm/test/CodeGen/Mips/mips64-f128.ll
index ad2da0d3d5a69..cbb708d8d5186 100644
--- a/llvm/test/CodeGen/Mips/mips64-f128.ll
+++ b/llvm/test/CodeGen/Mips/mips64-f128.ll
@@ -22,8 +22,8 @@
 
 define fp128 @addLD() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %add = fadd fp128 %0, %1
   ret fp128 %add
 }
@@ -33,8 +33,8 @@ entry:
 
 define fp128 @subLD() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %sub = fsub fp128 %0, %1
   ret fp128 %sub
 }
@@ -44,8 +44,8 @@ entry:
 
 define fp128 @mulLD() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %mul = fmul fp128 %0, %1
   ret fp128 %mul
 }
@@ -55,8 +55,8 @@ entry:
 
 define fp128 @divLD() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %div = fdiv fp128 %0, %1
   ret fp128 %div
 }
@@ -255,7 +255,7 @@ entry:
 
 define fp128 @libcall1_fabsl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @fabsl(fp128 %0) nounwind readnone
   ret fp128 %call
 }
@@ -267,7 +267,7 @@ declare fp128 @fabsl(fp128) #1
 
 define fp128 @libcall1_ceill() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @ceill(fp128 %0) nounwind readnone
   ret fp128 %call
 }
@@ -279,7 +279,7 @@ declare fp128 @ceill(fp128) #1
 
 define fp128 @libcall1_sinl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @sinl(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -291,7 +291,7 @@ declare fp128 @sinl(fp128) #2
 
 define fp128 @libcall1_cosl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @cosl(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -303,7 +303,7 @@ declare fp128 @cosl(fp128) #2
 
 define fp128 @libcall1_expl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @expl(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -315,7 +315,7 @@ declare fp128 @expl(fp128) #2
 
 define fp128 @libcall1_exp2l() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @exp2l(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -327,7 +327,7 @@ declare fp128 @exp2l(fp128) #2
 
 define fp128 @libcall1_logl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @logl(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -339,7 +339,7 @@ declare fp128 @logl(fp128) #2
 
 define fp128 @libcall1_log2l() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @log2l(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -351,7 +351,7 @@ declare fp128 @log2l(fp128) #2
 
 define fp128 @libcall1_log10l() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @log10l(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -363,7 +363,7 @@ declare fp128 @log10l(fp128) #2
 
 define fp128 @libcall1_nearbyintl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone
   ret fp128 %call
 }
@@ -375,7 +375,7 @@ declare fp128 @nearbyintl(fp128) #1
 
 define fp128 @libcall1_floorl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @floorl(fp128 %0) nounwind readnone
   ret fp128 %call
 }
@@ -387,7 +387,7 @@ declare fp128 @floorl(fp128) #1
 
 define fp128 @libcall1_sqrtl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @sqrtl(fp128 %0) nounwind
   ret fp128 %call
 }
@@ -399,7 +399,7 @@ declare fp128 @sqrtl(fp128) #2
 
 define fp128 @libcall1_rintl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld0, align 16
   %call = tail call fp128 @rintl(fp128 %0) nounwind readnone
   ret fp128 %call
 }
@@ -433,8 +433,8 @@ declare fp128 @llvm.powi.f128.i32(fp128, i32) #3
 
 define fp128 @libcall2_copysignl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone
   ret fp128 %call
 }
@@ -446,8 +446,8 @@ declare fp128 @copysignl(fp128, fp128) #1
 
 define fp128 @libcall2_powl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind
   ret fp128 %call
 }
@@ -459,8 +459,8 @@ declare fp128 @powl(fp128, fp128) #2
 
 define fp128 @libcall2_fmodl() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld1, align 16
   %call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind
   ret fp128 %call
 }
@@ -472,9 +472,9 @@ declare fp128 @fmodl(fp128, fp128) #2
 
 define fp128 @libcall3_fmal() {
 entry:
-  %0 = load fp128, fp128* @gld0, align 16
-  %1 = load fp128, fp128* @gld2, align 16
-  %2 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld0, align 16
+  %1 = load fp128, ptr @gld2, align 16
+  %2 = load fp128, ptr @gld1, align 16
   %3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1)
   ret fp128 %3
 }
@@ -548,7 +548,7 @@ entry:
 
 define fp128 @load_LD_LD() {
 entry:
-  %0 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld1, align 16
   ret fp128 %0
 }
 
@@ -561,7 +561,7 @@ entry:
 
 define fp128 @load_LD_float() {
 entry:
-  %0 = load float, float* @gf1, align 4
+  %0 = load float, ptr @gf1, align 4
   %conv = fpext float %0 to fp128
   ret fp128 %conv
 }
@@ -575,7 +575,7 @@ entry:
 
 define fp128 @load_LD_double() {
 entry:
-  %0 = load double, double* @gd1, align 8
+  %0 = load double, ptr @gd1, align 8
   %conv = fpext double %0 to fp128
   ret fp128 %conv
 }
@@ -590,8 +590,8 @@ entry:
 
 define void @store_LD_LD() {
 entry:
-  %0 = load fp128, fp128* @gld1, align 16
-  store fp128 %0, fp128* @gld0, align 16
+  %0 = load fp128, ptr @gld1, align 16
+  store fp128 %0, ptr @gld0, align 16
   ret void
 }
 
@@ -607,9 +607,9 @@ entry:
 
 define void @store_LD_float() {
 entry:
-  %0 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld1, align 16
   %conv = fptrunc fp128 %0 to float
-  store float %conv, float* @gf1, align 4
+  store float %conv, ptr @gf1, align 4
   ret void
 }
 
@@ -625,9 +625,9 @@ entry:
 
 define void @store_LD_double() {
 entry:
-  %0 = load fp128, fp128* @gld1, align 16
+  %0 = load fp128, ptr @gld1, align 16
   %conv = fptrunc fp128 %0 to double
-  store double %conv, double* @gd1, align 8
+  store double %conv, ptr @gd1, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips64-sret.ll b/llvm/test/CodeGen/Mips/mips64-sret.ll
index 8d3d6cbbb58dd..9f4bc9ddfcc54 100644
--- a/llvm/test/CodeGen/Mips/mips64-sret.ll
+++ b/llvm/test/CodeGen/Mips/mips64-sret.ll
@@ -1,23 +1,23 @@
 ; RUN: llc -march=mips64el -mcpu=mips64r2 -target-abi=n64 < %s | FileCheck %s
 
-define void @foo(i32* noalias sret(i32) %agg.result) nounwind {
+define void @foo(ptr noalias sret(i32) %agg.result) nounwind {
 entry:
 ; CHECK-LABEL: foo:
 ; CHECK: sw {{.*}}, 0($4)
 ; CHECK: jr $ra
 ; CHECK-NEXT: move $2, $4
 
-  store i32 42, i32* %agg.result
+  store i32 42, ptr %agg.result
   ret void
 }
 
-define void @bar(i32 signext %v, i32* noalias sret(i32) %agg.result) nounwind {
+define void @bar(i32 signext %v, ptr noalias sret(i32) %agg.result) nounwind {
 entry:
 ; CHECK-LABEL: bar:
 ; CHECK: sw $4, 0($5)
 ; CHECK: jr $ra
 ; CHECK-NEXT: move $2, $5
 
-  store i32 %v, i32* %agg.result
+  store i32 %v, ptr %agg.result
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/mips64directive.ll b/llvm/test/CodeGen/Mips/mips64directive.ll
index b1052f77f5a87..6d6674496f7d0 100644
--- a/llvm/test/CodeGen/Mips/mips64directive.ll
+++ b/llvm/test/CodeGen/Mips/mips64directive.ll
@@ -6,7 +6,7 @@
 ; CHECK: 8byte
 define i64 @foo1() nounwind readonly {
 entry:
-  %0 = load i64, i64* @gl, align 8
+  %0 = load i64, ptr @gl, align 8
   ret i64 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips64fpldst.ll b/llvm/test/CodeGen/Mips/mips64fpldst.ll
index c439b4ba62334..7811ed85f3adb 100644
--- a/llvm/test/CodeGen/Mips/mips64fpldst.ll
+++ b/llvm/test/CodeGen/Mips/mips64fpldst.ll
@@ -16,7 +16,7 @@ entry:
 ; CHECK-N32: funcfl1
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0)
 ; CHECK-N32: lwc1 $f{{[0-9]+}}, 0($[[R0]])
-  %0 = load float, float* @f0, align 4
+  %0 = load float, ptr @f0, align 4
   ret float %0
 }
 
@@ -28,7 +28,7 @@ entry:
 ; CHECK-N32: funcfl2
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0)
 ; CHECK-N32: ldc1 $f{{[0-9]+}}, 0($[[R0]])
-  %0 = load double, double* @d0, align 8
+  %0 = load double, ptr @d0, align 8
   ret double %0
 }
 
@@ -40,8 +40,8 @@ entry:
 ; CHECK-N32: funcfs1
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0)
 ; CHECK-N32: swc1 $f{{[0-9]+}}, 0($[[R0]])
-  %0 = load float, float* @f1, align 4
-  store float %0, float* @f0, align 4
+  %0 = load float, ptr @f1, align 4
+  store float %0, ptr @f0, align 4
   ret void
 }
 
@@ -53,8 +53,8 @@ entry:
 ; CHECK-N32: funcfs2
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0)
 ; CHECK-N32: sdc1 $f{{[0-9]+}}, 0($[[R0]])
-  %0 = load double, double* @d1, align 8
-  store double %0, double* @d0, align 8
+  %0 = load double, ptr @d1, align 8
+  store double %0, ptr @d0, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips64instrs.ll b/llvm/test/CodeGen/Mips/mips64instrs.ll
index c08c1b73d7404..e8b630b004cb0 100644
--- a/llvm/test/CodeGen/Mips/mips64instrs.ll
+++ b/llvm/test/CodeGen/Mips/mips64instrs.ll
@@ -121,8 +121,8 @@ entry:
 ; GPRMULDIV:     ddiv $2, $[[T0]], $[[T1]]
 ; GPRMULDIV:     teq $[[T1]], $zero, 7
 
-  %0 = load i64, i64* @gll0, align 8
-  %1 = load i64, i64* @gll1, align 8
+  %0 = load i64, ptr @gll0, align 8
+  %1 = load i64, ptr @gll1, align 8
   %div = sdiv i64 %0, %1
   ret i64 %div
 }
@@ -140,8 +140,8 @@ entry:
 ; GPRMULDIV:     ddivu $2, $[[T0]], $[[T1]]
 ; GPRMULDIV:     teq $[[T1]], $zero, 7
 
-  %0 = load i64, i64* @gll0, align 8
-  %1 = load i64, i64* @gll1, align 8
+  %0 = load i64, ptr @gll0, align 8
+  %1 = load i64, ptr @gll1, align 8
   %div = udiv i64 %0, %1
   ret i64 %div
 }

diff  --git a/llvm/test/CodeGen/Mips/mips64intldst.ll b/llvm/test/CodeGen/Mips/mips64intldst.ll
index 0abe192de117b..89e13247da014 100644
--- a/llvm/test/CodeGen/Mips/mips64intldst.ll
+++ b/llvm/test/CodeGen/Mips/mips64intldst.ll
@@ -20,7 +20,7 @@ entry:
 ; CHECK-N32: func1
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c)
 ; CHECK-N32: lb ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i8, i8* @c, align 4
+  %0 = load i8, ptr @c, align 4
   %conv = sext i8 %0 to i64
   ret i64 %conv
 }
@@ -33,7 +33,7 @@ entry:
 ; CHECK-N32: func2
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s)
 ; CHECK-N32: lh ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i16, i16* @s, align 4
+  %0 = load i16, ptr @s, align 4
   %conv = sext i16 %0 to i64
   ret i64 %conv
 }
@@ -46,7 +46,7 @@ entry:
 ; CHECK-N32: func3
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i)
 ; CHECK-N32: lw ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = sext i32 %0 to i64
   ret i64 %conv
 }
@@ -59,7 +59,7 @@ entry:
 ; CHECK-N32: func4
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l)
 ; CHECK-N32: ld ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i64, i64* @l, align 8
+  %0 = load i64, ptr @l, align 8
   ret i64 %0
 }
 
@@ -71,7 +71,7 @@ entry:
 ; CHECK-N32: ufunc1
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(uc)
 ; CHECK-N32: lbu ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i8, i8* @uc, align 4
+  %0 = load i8, ptr @uc, align 4
   %conv = zext i8 %0 to i64
   ret i64 %conv
 }
@@ -84,7 +84,7 @@ entry:
 ; CHECK-N32: ufunc2
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(us)
 ; CHECK-N32: lhu ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i16, i16* @us, align 4
+  %0 = load i16, ptr @us, align 4
   %conv = zext i16 %0 to i64
   ret i64 %conv
 }
@@ -97,7 +97,7 @@ entry:
 ; CHECK-N32: ufunc3
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(ui)
 ; CHECK-N32: lwu ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i32, i32* @ui, align 4
+  %0 = load i32, ptr @ui, align 4
   %conv = zext i32 %0 to i64
   ret i64 %conv
 }
@@ -110,9 +110,9 @@ entry:
 ; CHECK-N32: sfunc1
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c)
 ; CHECK-N32: sb ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i64, i64* @l1, align 8
+  %0 = load i64, ptr @l1, align 8
   %conv = trunc i64 %0 to i8
-  store i8 %conv, i8* @c, align 4
+  store i8 %conv, ptr @c, align 4
   ret void
 }
 
@@ -124,9 +124,9 @@ entry:
 ; CHECK-N32: sfunc2
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s)
 ; CHECK-N32: sh ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i64, i64* @l1, align 8
+  %0 = load i64, ptr @l1, align 8
   %conv = trunc i64 %0 to i16
-  store i16 %conv, i16* @s, align 4
+  store i16 %conv, ptr @s, align 4
   ret void
 }
 
@@ -138,9 +138,9 @@ entry:
 ; CHECK-N32: sfunc3
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i)
 ; CHECK-N32: sw ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i64, i64* @l1, align 8
+  %0 = load i64, ptr @l1, align 8
   %conv = trunc i64 %0 to i32
-  store i32 %conv, i32* @i, align 4
+  store i32 %conv, ptr @i, align 4
   ret void
 }
 
@@ -152,8 +152,8 @@ entry:
 ; CHECK-N32: sfunc4
 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l)
 ; CHECK-N32: sd ${{[0-9]+}}, 0($[[R0]])
-  %0 = load i64, i64* @l1, align 8
-  store i64 %0, i64* @l, align 8
+  %0 = load i64, ptr @l1, align 8
+  store i64 %0, ptr @l, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/mips64lea.ll b/llvm/test/CodeGen/Mips/mips64lea.ll
index e866b217a59ce..69c0796035166 100644
--- a/llvm/test/CodeGen/Mips/mips64lea.ll
+++ b/llvm/test/CodeGen/Mips/mips64lea.ll
@@ -5,9 +5,9 @@ define void @foo3() nounwind {
 entry:
 ; CHECK: daddiu ${{[0-9]+}}, $sp
   %a = alloca i32, align 4
-  call void @foo1(i32* %a) nounwind
+  call void @foo1(ptr %a) nounwind
   ret void
 }
 
-declare void @foo1(i32*)
+declare void @foo1(ptr)
 

diff  --git a/llvm/test/CodeGen/Mips/mips64signextendsesf.ll b/llvm/test/CodeGen/Mips/mips64signextendsesf.ll
index 2ee1e09f50029..831469ab06247 100644
--- a/llvm/test/CodeGen/Mips/mips64signextendsesf.ll
+++ b/llvm/test/CodeGen/Mips/mips64signextendsesf.ll
@@ -4,10 +4,10 @@ define void @foosf() #0 {
 entry:
   %in = alloca float, align 4
   %out = alloca float, align 4
-  store volatile float 0xBFD59E1380000000, float* %in, align 4
-  %in.0.in.0. = load volatile float, float* %in, align 4
+  store volatile float 0xBFD59E1380000000, ptr %in, align 4
+  %in.0.in.0. = load volatile float, ptr %in, align 4
   %rintf = tail call float @rintf(float %in.0.in.0.) #1
-  store volatile float %rintf, float* %out, align 4
+  store volatile float %rintf, ptr %out, align 4
   ret void
 
 ; CHECK-LABEL:      foosf
@@ -18,9 +18,9 @@ entry:
 
 declare float @rintf(float)
 
-define float @foosf1(float* nocapture readonly %a) #0 {
+define float @foosf1(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %call = tail call float @roundf(float %0) #2
   ret float %call
 
@@ -32,9 +32,9 @@ entry:
 
 declare float @roundf(float) #1
 
-define float @foosf2(float* nocapture readonly %a) #0 {
+define float @foosf2(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %call = tail call float @truncf(float %0) #2
   ret float %call
 
@@ -46,9 +46,9 @@ entry:
 
 declare float @truncf(float) #1
 
-define float @foosf3(float* nocapture readonly %a) #0 {
+define float @foosf3(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %call = tail call float @floorf(float %0) #2
   ret float %call
 
@@ -60,9 +60,9 @@ entry:
 
 declare float @floorf(float) #1
 
-define float @foosf4(float* nocapture readonly %a) #0 {
+define float @foosf4(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %call = tail call float @nearbyintf(float %0) #2
   ret float %call
 
@@ -74,9 +74,9 @@ entry:
 
 declare float @nearbyintf(float) #1
 
-define float @foosf5(float* nocapture readonly %a) #0 {
+define float @foosf5(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %mul = fmul float %0, undef
   ret float %mul
 
@@ -86,9 +86,9 @@ entry:
 ; CHECK-NOT:        lwu
 }
 
-define float @foosf6(float* nocapture readonly %a) #0 {
+define float @foosf6(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %sub = fsub float %0, undef
   ret float %sub
 
@@ -98,9 +98,9 @@ entry:
 ; CHECK-NOT:        lwu
 }
 
-define float @foosf7(float* nocapture readonly %a) #0 {
+define float @foosf7(ptr nocapture readonly %a) #0 {
 entry:
-  %0 = load float, float* %a, align 4
+  %0 = load float, ptr %a, align 4
   %add = fadd float %0, undef
   ret float %add
 
@@ -110,11 +110,11 @@ entry:
 ; CHECK-NOT:        lwu
 }
 
-define float @foosf8(float* nocapture readonly %a) #0 {
+define float @foosf8(ptr nocapture readonly %a) #0 {
 entry:
   %b = alloca float, align 4
-  %b.0.b.0. = load volatile float, float* %b, align 4
-  %0 = load float, float* %a, align 4
+  %b.0.b.0. = load volatile float, ptr %b, align 4
+  %0 = load float, ptr %a, align 4
   %div = fdiv float %b.0.b.0., %0
   ret float %div
 
@@ -127,9 +127,9 @@ entry:
 define float @foosf9() #0 {
 entry:
   %b = alloca float, align 4
-  %b.0.b.0. = load volatile float, float* %b, align 4
+  %b.0.b.0. = load volatile float, ptr %b, align 4
   %conv = fpext float %b.0.b.0. to double
-  %b.0.b.0.3 = load volatile float, float* %b, align 4
+  %b.0.b.0.3 = load volatile float, ptr %b, align 4
   %conv1 = fpext float %b.0.b.0.3 to double
   %call = tail call double @pow(double %conv, double %conv1) #1
   %conv2 = fptrunc double %call to float
@@ -146,7 +146,7 @@ declare double @pow(double, double) #0
 define float @foosf10() #0 {
 entry:
   %a = alloca float, align 4
-  %a.0.a.0. = load volatile float, float* %a, align 4
+  %a.0.a.0. = load volatile float, ptr %a, align 4
   %conv = fpext float %a.0.a.0. to double
   %call = tail call double @sin(double %conv) #1
   %conv1 = fptrunc double %call to float
@@ -163,7 +163,7 @@ declare double @sin(double) #0
 define float @foosf11() #0 {
 entry:
   %b = alloca float, align 4
-  %b.0.b.0. = load volatile float, float* %b, align 4
+  %b.0.b.0. = load volatile float, ptr %b, align 4
   %call = tail call float @ceilf(float %b.0.b.0.) #2
   ret float %call
 
@@ -179,8 +179,8 @@ define float @foosf12() #0 {
 entry:
   %b = alloca float, align 4
   %a = alloca float, align 4
-  %b.0.b.0. = load volatile float, float* %b, align 4
-  %a.0.a.0. = load volatile float, float* %a, align 4
+  %b.0.b.0. = load volatile float, ptr %b, align 4
+  %a.0.a.0. = load volatile float, ptr %a, align 4
   %call = tail call float @fmaxf(float %b.0.b.0., float %a.0.a.0.) #2
   ret float %call
 
@@ -196,8 +196,8 @@ define float @foosf13() #0 {
 entry:
   %b = alloca float, align 4
   %a = alloca float, align 4
-  %b.0.b.0. = load volatile float, float* %b, align 4
-  %a.0.a.0. = load volatile float, float* %a, align 4
+  %b.0.b.0. = load volatile float, ptr %b, align 4
+  %a.0.a.0. = load volatile float, ptr %a, align 4
   %call = tail call float @fminf(float %b.0.b.0., float %a.0.a.0.) #2
   ret float %call
 

diff  --git a/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll b/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll
index 87f803ecf552f..12c353d72c56c 100644
--- a/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll
+++ b/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll
@@ -4,8 +4,8 @@
 define double @foo() #0 {
 entry:
   %x = alloca i32, align 4
-  store volatile i32 -32, i32* %x, align 4
-  %0 = load volatile i32, i32* %x, align 4
+  store volatile i32 -32, ptr %x, align 4
+  %0 = load volatile i32, ptr %x, align 4
   %conv = sitofp i32 %0 to double
   ret double %conv
 

diff  --git a/llvm/test/CodeGen/Mips/mipslopat.ll b/llvm/test/CodeGen/Mips/mipslopat.ll
index 63b68c1762b29..c6143844a8d4d 100644
--- a/llvm/test/CodeGen/Mips/mipslopat.ll
+++ b/llvm/test/CodeGen/Mips/mipslopat.ll
@@ -1,19 +1,17 @@
 ; This test does not check the machine code output.   
 ; RUN: llc -march=mips < %s 
 
- at stat_vol_ptr_int = internal global i32* null, align 4
- at stat_ptr_vol_int = internal global i32* null, align 4
+ at stat_vol_ptr_int = internal global ptr null, align 4
+ at stat_ptr_vol_int = internal global ptr null, align 4
 
 define void @simple_vol_file() nounwind {
 entry:
-  %tmp = load volatile i32*, i32** @stat_vol_ptr_int, align 4
-  %0 = bitcast i32* %tmp to i8*
-  call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1)
-  %tmp1 = load i32*, i32** @stat_ptr_vol_int, align 4
-  %1 = bitcast i32* %tmp1 to i8*
-  call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
+  %tmp = load volatile ptr, ptr @stat_vol_ptr_int, align 4
+  call void @llvm.prefetch(ptr %tmp, i32 0, i32 0, i32 1)
+  %tmp1 = load ptr, ptr @stat_ptr_vol_int, align 4
+  call void @llvm.prefetch(ptr %tmp1, i32 0, i32 0, i32 1)
   ret void
 }
 
-declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind
 

diff  --git a/llvm/test/CodeGen/Mips/misha.ll b/llvm/test/CodeGen/Mips/misha.ll
index bedea9de5f927..b8e45c633aaa4 100644
--- a/llvm/test/CodeGen/Mips/misha.ll
+++ b/llvm/test/CodeGen/Mips/misha.ll
@@ -1,6 +1,6 @@
 ; RUN: llc  -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16
 
-define i32 @sumc(i8* nocapture %to, i8* nocapture %from, i32) nounwind {
+define i32 @sumc(ptr nocapture %to, ptr nocapture %from, i32) nounwind {
 entry:
   %sext = shl i32 %0, 16
   %conv = ashr exact i32 %sext, 16
@@ -8,20 +8,20 @@ entry:
   br i1 %cmp8, label %for.end, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %entry
-  %.pre = load i8, i8* %to, align 1
+  %.pre = load i8, ptr %to, align 1
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
   %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  %from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
-  %incdec.ptr = getelementptr inbounds i8, i8* %from.addr.09, i32 1
-  %2 = load i8, i8* %from.addr.09, align 1
+  %from.addr.09 = phi ptr [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+  %incdec.ptr = getelementptr inbounds i8, ptr %from.addr.09, i32 1
+  %2 = load i8, ptr %from.addr.09, align 1
   %conv27 = zext i8 %2 to i32
   %conv36 = zext i8 %1 to i32
   %add = add nsw i32 %conv36, %conv27
   %conv4 = trunc i32 %add to i8
-  store i8 %conv4, i8* %to, align 1
+  store i8 %conv4, ptr %to, align 1
   %inc = add nsw i32 %i.010, 1
   %cmp = icmp eq i32 %inc, %conv
   br i1 %cmp, label %for.end, label %for.body
@@ -36,7 +36,7 @@ for.end:                                          ; preds = %for.body, %entry
   ret i32 undef
 }
 
-define i32 @sum(i16* nocapture %to, i16* nocapture %from, i32) nounwind {
+define i32 @sum(ptr nocapture %to, ptr nocapture %from, i32) nounwind {
 entry:
   %sext = shl i32 %0, 16
   %conv = ashr exact i32 %sext, 16
@@ -44,20 +44,20 @@ entry:
   br i1 %cmp8, label %for.end, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %entry
-  %.pre = load i16, i16* %to, align 2
+  %.pre = load i16, ptr %to, align 2
   br label %for.body
 
 for.body:                                         ; preds = %for.body.lr.ph, %for.body
   %1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
   %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  %from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
-  %incdec.ptr = getelementptr inbounds i16, i16* %from.addr.09, i32 1
-  %2 = load i16, i16* %from.addr.09, align 2
+  %from.addr.09 = phi ptr [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
+  %incdec.ptr = getelementptr inbounds i16, ptr %from.addr.09, i32 1
+  %2 = load i16, ptr %from.addr.09, align 2
   %conv27 = zext i16 %2 to i32
   %conv36 = zext i16 %1 to i32
   %add = add nsw i32 %conv36, %conv27
   %conv4 = trunc i32 %add to i16
-  store i16 %conv4, i16* %to, align 2
+  store i16 %conv4, ptr %to, align 2
   %inc = add nsw i32 %i.010, 1
   %cmp = icmp eq i32 %inc, %conv
   br i1 %cmp, label %for.end, label %for.body

diff  --git a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll
index 1ae6fc15d9545..8a927e44c4ef5 100644
--- a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll
+++ b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll
@@ -142,7 +142,7 @@
 
 define double @test_ldc1() {
 entry:
-  %0 = load double, double* @g0, align 8
+  %0 = load double, ptr @g0, align 8
   ret double %0
 }
 
@@ -227,7 +227,7 @@ entry:
 
 define void @test_sdc1(double %a) {
 entry:
-  store double %a, double* @g0, align 8
+  store double %a, ptr @g0, align 8
   ret void
 }
 
@@ -278,10 +278,10 @@ entry:
 ; MM-STATIC-PIC: addu16  $[[R1:[0-9]+]], $4, $[[R0]]
 ; MM-STATIC-PIC: ldc1    $f0, 0($[[R1]])
 
-define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
+define double @test_ldxc1(ptr nocapture readonly %a, i32 %i) {
 entry:
-  %arrayidx = getelementptr inbounds double, double* %a, i32 %i
-  %0 = load double, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %a, i32 %i
+  %0 = load double, ptr %arrayidx, align 8
   ret double %0
 }
 
@@ -326,9 +326,9 @@ entry:
 ; MM-STATIC-PIC: addu16  $[[R1:[0-9]+]], $6, $[[R0]]
 ; MM-STATIC-PIC: sdc1    $f12, 0($[[R1]])
 
-define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
+define void @test_sdxc1(double %b, ptr nocapture %a, i32 %i) {
 entry:
-  %arrayidx = getelementptr inbounds double, double* %a, i32 %i
-  store double %b, double* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr %a, i32 %i
+  store double %b, ptr %arrayidx, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/2r.ll b/llvm/test/CodeGen/Mips/msa/2r.ll
index 6f8d574202cdf..b7ea3fc11c6e3 100644
--- a/llvm/test/CodeGen/Mips/msa/2r.ll
+++ b/llvm/test/CodeGen/Mips/msa/2r.ll
@@ -8,9 +8,9 @@
 
 define void @llvm_mips_nloc_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nloc_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_nloc_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_nloc_b_RES
   ret void
 }
 
@@ -29,9 +29,9 @@ declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind
 
 define void @llvm_mips_nloc_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nloc_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_nloc_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_nloc_h_RES
   ret void
 }
 
@@ -50,9 +50,9 @@ declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind
 
 define void @llvm_mips_nloc_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nloc_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_nloc_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_nloc_w_RES
   ret void
 }
 
@@ -71,9 +71,9 @@ declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind
 
 define void @llvm_mips_nloc_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nloc_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_nloc_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_nloc_d_RES
   ret void
 }
 
@@ -92,9 +92,9 @@ declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind
 
 define void @llvm_mips_nlzc_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nlzc_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_nlzc_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_nlzc_b_RES
   ret void
 }
 
@@ -113,9 +113,9 @@ declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind
 
 define void @llvm_mips_nlzc_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nlzc_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_nlzc_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_nlzc_h_RES
   ret void
 }
 
@@ -134,9 +134,9 @@ declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind
 
 define void @llvm_mips_nlzc_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nlzc_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_nlzc_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_nlzc_w_RES
   ret void
 }
 
@@ -155,9 +155,9 @@ declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind
 
 define void @llvm_mips_nlzc_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nlzc_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_nlzc_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_nlzc_d_RES
   ret void
 }
 
@@ -176,9 +176,9 @@ declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind
 
 define void @llvm_mips_pcnt_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pcnt_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_pcnt_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_pcnt_b_RES
   ret void
 }
 
@@ -197,9 +197,9 @@ declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind
 
 define void @llvm_mips_pcnt_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pcnt_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_pcnt_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_pcnt_h_RES
   ret void
 }
 
@@ -218,9 +218,9 @@ declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind
 
 define void @llvm_mips_pcnt_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pcnt_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_pcnt_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_pcnt_w_RES
   ret void
 }
 
@@ -239,9 +239,9 @@ declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind
 
 define void @llvm_mips_pcnt_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pcnt_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_pcnt_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_pcnt_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll
index 54936331072c0..f369a9ebca384 100644
--- a/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll
+++ b/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll
@@ -15,9 +15,9 @@
 
 define void @llvm_mips_fill_b_test() nounwind {
 entry:
-  %0 = load i32, i32* @llvm_mips_fill_b_ARG1
+  %0 = load i32, ptr @llvm_mips_fill_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_fill_b_RES
   ret void
 }
 
@@ -35,9 +35,9 @@ declare <16 x i8> @llvm.mips.fill.b(i32) nounwind
 
 define void @llvm_mips_fill_h_test() nounwind {
 entry:
-  %0 = load i32, i32* @llvm_mips_fill_h_ARG1
+  %0 = load i32, ptr @llvm_mips_fill_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_fill_h_RES
   ret void
 }
 
@@ -55,9 +55,9 @@ declare <8 x i16> @llvm.mips.fill.h(i32) nounwind
 
 define void @llvm_mips_fill_w_test() nounwind {
 entry:
-  %0 = load i32, i32* @llvm_mips_fill_w_ARG1
+  %0 = load i32, ptr @llvm_mips_fill_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_fill_w_RES
   ret void
 }
 
@@ -75,9 +75,9 @@ declare <4 x i32> @llvm.mips.fill.w(i32) nounwind
 
 define void @llvm_mips_fill_d_test() nounwind {
 entry:
-  %0 = load i64, i64* @llvm_mips_fill_d_ARG1
+  %0 = load i64, ptr @llvm_mips_fill_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_fill_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf.ll b/llvm/test/CodeGen/Mips/msa/2rf.ll
index bb540c4144089..6cdf5b7b8b423 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf.ll
@@ -8,9 +8,9 @@
 
 define void @llvm_mips_flog2_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_flog2_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+  store <4 x float> %1, ptr @llvm_mips_flog2_w_RES
   ret void
 }
 
@@ -29,9 +29,9 @@ declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind
 
 define void @llvm_mips_flog2_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_flog2_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+  store <2 x double> %1, ptr @llvm_mips_flog2_d_RES
   ret void
 }
 
@@ -47,9 +47,9 @@ declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind
 
 define void @flog2_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_flog2_w_ARG1
   %1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES
+  store <4 x float> %1, ptr @llvm_mips_flog2_w_RES
   ret void
 }
 
@@ -65,9 +65,9 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float> %val)
 
 define void @flog2_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_flog2_d_ARG1
   %1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES
+  store <2 x double> %1, ptr @llvm_mips_flog2_d_RES
   ret void
 }
 
@@ -86,9 +86,9 @@ declare <2 x double> @llvm.log2.v2f64(<2 x double> %val)
 
 define void @llvm_mips_frint_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_frint_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+  store <4 x float> %1, ptr @llvm_mips_frint_w_RES
   ret void
 }
 
@@ -107,9 +107,9 @@ declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind
 
 define void @llvm_mips_frint_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_frint_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+  store <2 x double> %1, ptr @llvm_mips_frint_d_RES
   ret void
 }
 
@@ -125,9 +125,9 @@ declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind
 
 define void @frint_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_frint_w_ARG1
   %1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES
+  store <4 x float> %1, ptr @llvm_mips_frint_w_RES
   ret void
 }
 
@@ -143,9 +143,9 @@ declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind
 
 define void @frint_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_frint_d_ARG1
   %1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES
+  store <2 x double> %1, ptr @llvm_mips_frint_d_RES
   ret void
 }
 
@@ -164,9 +164,9 @@ declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind
 
 define void @llvm_mips_frcp_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_frcp_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_frcp_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES
+  store <4 x float> %1, ptr @llvm_mips_frcp_w_RES
   ret void
 }
 
@@ -185,9 +185,9 @@ declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind
 
 define void @llvm_mips_frcp_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_frcp_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_frcp_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES
+  store <2 x double> %1, ptr @llvm_mips_frcp_d_RES
   ret void
 }
 
@@ -206,9 +206,9 @@ declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind
 
 define void @llvm_mips_frsqrt_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_frsqrt_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_frsqrt_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES
+  store <4 x float> %1, ptr @llvm_mips_frsqrt_w_RES
   ret void
 }
 
@@ -227,9 +227,9 @@ declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind
 
 define void @llvm_mips_frsqrt_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_frsqrt_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_frsqrt_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES
+  store <2 x double> %1, ptr @llvm_mips_frsqrt_d_RES
   ret void
 }
 
@@ -248,9 +248,9 @@ declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind
 
 define void @llvm_mips_fsqrt_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_fsqrt_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+  store <4 x float> %1, ptr @llvm_mips_fsqrt_w_RES
   ret void
 }
 
@@ -269,9 +269,9 @@ declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind
 
 define void @llvm_mips_fsqrt_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_fsqrt_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+  store <2 x double> %1, ptr @llvm_mips_fsqrt_d_RES
   ret void
 }
 
@@ -287,9 +287,9 @@ declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind
 
 define void @fsqrt_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_fsqrt_w_ARG1
   %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES
+  store <4 x float> %1, ptr @llvm_mips_fsqrt_w_RES
   ret void
 }
 
@@ -305,9 +305,9 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind
 
 define void @fsqrt_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_fsqrt_d_ARG1
   %1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES
+  store <2 x double> %1, ptr @llvm_mips_fsqrt_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf_exup.ll b/llvm/test/CodeGen/Mips/msa/2rf_exup.ll
index 20560fb3f3eb9..f8bdf866c5f82 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf_exup.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf_exup.ll
@@ -9,9 +9,9 @@
 
 define void @llvm_mips_fexupl_w_test() nounwind {
 entry:
-  %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupl_w_ARG1
+  %0 = load <8 x half>, ptr @llvm_mips_fexupl_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES
+  store <4 x float> %1, ptr @llvm_mips_fexupl_w_RES
   ret void
 }
 
@@ -28,9 +28,9 @@ declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind
 
 define void @llvm_mips_fexupl_d_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupl_d_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_fexupl_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES
+  store <2 x double> %1, ptr @llvm_mips_fexupl_d_RES
   ret void
 }
 
@@ -47,9 +47,9 @@ declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind
 
 define void @llvm_mips_fexupr_w_test() nounwind {
 entry:
-  %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupr_w_ARG1
+  %0 = load <8 x half>, ptr @llvm_mips_fexupr_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES
+  store <4 x float> %1, ptr @llvm_mips_fexupr_w_RES
   ret void
 }
 
@@ -66,9 +66,9 @@ declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind
 
 define void @llvm_mips_fexupr_d_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupr_d_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_fexupr_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES
+  store <2 x double> %1, ptr @llvm_mips_fexupr_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll b/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll
index 25381ed1eee29..da83b7eb180b7 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll
@@ -9,9 +9,9 @@
 
 define void @llvm_mips_ffint_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_ffint_s_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES
+  store <4 x float> %1, ptr @llvm_mips_ffint_s_w_RES
   ret void
 }
 
@@ -30,9 +30,9 @@ declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind
 
 define void @llvm_mips_ffint_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_ffint_s_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES
+  store <2 x double> %1, ptr @llvm_mips_ffint_s_d_RES
   ret void
 }
 
@@ -51,9 +51,9 @@ declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind
 
 define void @llvm_mips_ffint_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_ffint_u_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES
+  store <4 x float> %1, ptr @llvm_mips_ffint_u_w_RES
   ret void
 }
 
@@ -72,9 +72,9 @@ declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind
 
 define void @llvm_mips_ffint_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_ffint_u_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES
+  store <2 x double> %1, ptr @llvm_mips_ffint_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf_fq.ll b/llvm/test/CodeGen/Mips/msa/2rf_fq.ll
index 0ee6386764396..2d773bfda17c8 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf_fq.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf_fq.ll
@@ -9,9 +9,9 @@
 
 define void @llvm_mips_ffql_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffql_w_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_ffql_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES
+  store <4 x float> %1, ptr @llvm_mips_ffql_w_RES
   ret void
 }
 
@@ -28,9 +28,9 @@ declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind
 
 define void @llvm_mips_ffql_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffql_d_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_ffql_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES
+  store <2 x double> %1, ptr @llvm_mips_ffql_d_RES
   ret void
 }
 
@@ -47,9 +47,9 @@ declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind
 
 define void @llvm_mips_ffqr_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffqr_w_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_ffqr_w_ARG1
   %1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0)
-  store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES
+  store <4 x float> %1, ptr @llvm_mips_ffqr_w_RES
   ret void
 }
 
@@ -66,9 +66,9 @@ declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind
 
 define void @llvm_mips_ffqr_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffqr_d_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_ffqr_d_ARG1
   %1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0)
-  store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES
+  store <2 x double> %1, ptr @llvm_mips_ffqr_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll b/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll
index 8aeafded89ea7..eeac8d4495716 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll
@@ -10,9 +10,9 @@
 
 define void @llvm_mips_fclass_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fclass_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_fclass_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_fclass_w_RES
   ret void
 }
 
@@ -31,9 +31,9 @@ declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind
 
 define void @llvm_mips_fclass_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fclass_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_fclass_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_fclass_d_RES
   ret void
 }
 
@@ -52,9 +52,9 @@ declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind
 
 define void @llvm_mips_ftrunc_s_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_s_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_ftrunc_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_ftrunc_s_w_RES
   ret void
 }
 
@@ -73,9 +73,9 @@ declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind
 
 define void @llvm_mips_ftrunc_s_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_s_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_ftrunc_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_ftrunc_s_d_RES
   ret void
 }
 
@@ -94,9 +94,9 @@ declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind
 
 define void @llvm_mips_ftrunc_u_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_u_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_ftrunc_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_ftrunc_u_w_RES
   ret void
 }
 
@@ -115,9 +115,9 @@ declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind
 
 define void @llvm_mips_ftrunc_u_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_u_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_ftrunc_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_ftrunc_u_d_RES
   ret void
 }
 
@@ -136,9 +136,9 @@ declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind
 
 define void @llvm_mips_ftint_s_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_s_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_ftint_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_ftint_s_w_RES
   ret void
 }
 
@@ -157,9 +157,9 @@ declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind
 
 define void @llvm_mips_ftint_s_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_s_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_ftint_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_ftint_s_d_RES
   ret void
 }
 
@@ -178,9 +178,9 @@ declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind
 
 define void @llvm_mips_ftint_u_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_u_w_ARG1
+  %0 = load <4 x float>, ptr @llvm_mips_ftint_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_ftint_u_w_RES
   ret void
 }
 
@@ -199,9 +199,9 @@ declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind
 
 define void @llvm_mips_ftint_u_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_u_d_ARG1
+  %0 = load <2 x double>, ptr @llvm_mips_ftint_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_ftint_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/2rf_tq.ll b/llvm/test/CodeGen/Mips/msa/2rf_tq.ll
index 1066c3766e934..110da06777813 100644
--- a/llvm/test/CodeGen/Mips/msa/2rf_tq.ll
+++ b/llvm/test/CodeGen/Mips/msa/2rf_tq.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_ftq_h_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_ftq_h_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_ftq_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ftq_h_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_ftq_w_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_ftq_w_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_ftq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ftq_w_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-a.ll b/llvm/test/CodeGen/Mips/msa/3r-a.ll
index 933c4ed6946d4..31646350b6802 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-a.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-a.ll
@@ -15,10 +15,10 @@
 
 define void @llvm_mips_add_a_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_add_a_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_add_a_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_add_a_b_RES
   ret void
 }
 
@@ -40,10 +40,10 @@ declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_add_a_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_add_a_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_add_a_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_add_a_h_RES
   ret void
 }
 
@@ -65,10 +65,10 @@ declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_add_a_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_add_a_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_add_a_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_add_a_w_RES
   ret void
 }
 
@@ -90,10 +90,10 @@ declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_add_a_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_add_a_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_add_a_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_add_a_d_RES
   ret void
 }
 
@@ -115,10 +115,10 @@ declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_adds_a_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_adds_a_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_adds_a_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_adds_a_b_RES
   ret void
 }
 
@@ -140,10 +140,10 @@ declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_adds_a_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_adds_a_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_adds_a_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_adds_a_h_RES
   ret void
 }
 
@@ -165,10 +165,10 @@ declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_adds_a_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_adds_a_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_adds_a_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_adds_a_w_RES
   ret void
 }
 
@@ -190,10 +190,10 @@ declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_adds_a_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_adds_a_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_adds_a_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_adds_a_d_RES
   ret void
 }
 
@@ -215,10 +215,10 @@ declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_adds_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_adds_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_adds_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_adds_s_b_RES
   ret void
 }
 
@@ -240,10 +240,10 @@ declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_adds_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_adds_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_adds_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_adds_s_h_RES
   ret void
 }
 
@@ -265,10 +265,10 @@ declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_adds_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_adds_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_adds_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_adds_s_w_RES
   ret void
 }
 
@@ -290,10 +290,10 @@ declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_adds_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_adds_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_adds_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_adds_s_d_RES
   ret void
 }
 
@@ -315,10 +315,10 @@ declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_adds_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_adds_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_adds_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_adds_u_b_RES
   ret void
 }
 
@@ -340,10 +340,10 @@ declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_adds_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_adds_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_adds_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_adds_u_h_RES
   ret void
 }
 
@@ -365,10 +365,10 @@ declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_adds_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_adds_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_adds_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_adds_u_w_RES
   ret void
 }
 
@@ -390,10 +390,10 @@ declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_adds_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_adds_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_adds_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_adds_u_d_RES
   ret void
 }
 
@@ -415,10 +415,10 @@ declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_addv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_addv_b_RES
   ret void
 }
 
@@ -440,10 +440,10 @@ declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_addv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_addv_h_RES
   ret void
 }
 
@@ -465,10 +465,10 @@ declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_addv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_addv_w_RES
   ret void
 }
 
@@ -490,10 +490,10 @@ declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_addv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_addv_d_RES
   ret void
 }
 
@@ -512,10 +512,10 @@ declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @addv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_addv_b_ARG2
   %2 = add <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_addv_b_RES
   ret void
 }
 
@@ -532,10 +532,10 @@ entry:
 
 define void @addv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_addv_h_ARG2
   %2 = add <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_addv_h_RES
   ret void
 }
 
@@ -552,10 +552,10 @@ entry:
 
 define void @addv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_addv_w_ARG2
   %2 = add <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_addv_w_RES
   ret void
 }
 
@@ -572,10 +572,10 @@ entry:
 
 define void @addv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_addv_d_ARG2
   %2 = add <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_addv_d_RES
   ret void
 }
 
@@ -595,10 +595,10 @@ entry:
 
 define void @llvm_mips_asub_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_asub_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_asub_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_asub_s_b_RES
   ret void
 }
 
@@ -620,10 +620,10 @@ declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_asub_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_asub_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_asub_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_asub_s_h_RES
   ret void
 }
 
@@ -645,10 +645,10 @@ declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_asub_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_asub_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_asub_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_asub_s_w_RES
   ret void
 }
 
@@ -670,10 +670,10 @@ declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_asub_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_asub_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_asub_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_asub_s_d_RES
   ret void
 }
 
@@ -695,10 +695,10 @@ declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_asub_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_asub_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_asub_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_asub_u_b_RES
   ret void
 }
 
@@ -720,10 +720,10 @@ declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_asub_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_asub_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_asub_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_asub_u_h_RES
   ret void
 }
 
@@ -745,10 +745,10 @@ declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_asub_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_asub_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_asub_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_asub_u_w_RES
   ret void
 }
 
@@ -770,10 +770,10 @@ declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_asub_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_asub_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_asub_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_asub_u_d_RES
   ret void
 }
 
@@ -795,10 +795,10 @@ declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_ave_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ave_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ave_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ave_s_b_RES
   ret void
 }
 
@@ -820,10 +820,10 @@ declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ave_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ave_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ave_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ave_s_h_RES
   ret void
 }
 
@@ -845,10 +845,10 @@ declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ave_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ave_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ave_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ave_s_w_RES
   ret void
 }
 
@@ -870,10 +870,10 @@ declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ave_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ave_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ave_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ave_s_d_RES
   ret void
 }
 
@@ -895,10 +895,10 @@ declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_ave_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ave_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ave_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ave_u_b_RES
   ret void
 }
 
@@ -920,10 +920,10 @@ declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ave_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ave_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ave_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ave_u_h_RES
   ret void
 }
 
@@ -945,10 +945,10 @@ declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ave_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ave_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ave_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ave_u_w_RES
   ret void
 }
 
@@ -970,10 +970,10 @@ declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ave_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ave_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ave_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ave_u_d_RES
   ret void
 }
 
@@ -995,10 +995,10 @@ declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_aver_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_aver_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_aver_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_aver_s_b_RES
   ret void
 }
 
@@ -1020,10 +1020,10 @@ declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_aver_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_aver_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_aver_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_aver_s_h_RES
   ret void
 }
 
@@ -1045,10 +1045,10 @@ declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_aver_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_aver_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_aver_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_aver_s_w_RES
   ret void
 }
 
@@ -1070,10 +1070,10 @@ declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_aver_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_aver_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_aver_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_aver_s_d_RES
   ret void
 }
 
@@ -1095,10 +1095,10 @@ declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_aver_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_aver_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_aver_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_aver_u_b_RES
   ret void
 }
 
@@ -1120,10 +1120,10 @@ declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_aver_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_aver_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_aver_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_aver_u_h_RES
   ret void
 }
 
@@ -1145,10 +1145,10 @@ declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_aver_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_aver_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_aver_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_aver_u_w_RES
   ret void
 }
 
@@ -1170,10 +1170,10 @@ declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_aver_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_aver_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_aver_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_aver_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-b.ll b/llvm/test/CodeGen/Mips/msa/3r-b.ll
index 114b0ffb8b4a9..f824a6527d72d 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-b.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-b.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_bclr_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bclr_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bclr_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bclr_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bclr_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bclr_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bclr_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bclr_d_RES
   ret void
 }
 
@@ -99,11 +99,11 @@ declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_binsl_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_binsl_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_binsl_b_RES
   ret void
 }
 
@@ -127,11 +127,11 @@ declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_binsl_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_binsl_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_binsl_h_RES
   ret void
 }
 
@@ -155,11 +155,11 @@ declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_binsl_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_binsl_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_binsl_w_RES
   ret void
 }
 
@@ -183,11 +183,11 @@ declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_binsl_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_binsl_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_binsl_d_RES
   ret void
 }
 
@@ -211,11 +211,11 @@ declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_binsr_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_binsr_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_binsr_b_RES
   ret void
 }
 
@@ -239,11 +239,11 @@ declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_binsr_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_binsr_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_binsr_h_RES
   ret void
 }
 
@@ -267,11 +267,11 @@ declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_binsr_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_binsr_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_binsr_w_RES
   ret void
 }
 
@@ -295,11 +295,11 @@ declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_binsr_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_binsr_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_binsr_d_RES
   ret void
 }
 
@@ -322,10 +322,10 @@ declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_bneg_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bneg_b_RES
   ret void
 }
 
@@ -344,10 +344,10 @@ declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bneg_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bneg_h_RES
   ret void
 }
 
@@ -366,10 +366,10 @@ declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bneg_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bneg_w_RES
   ret void
 }
 
@@ -388,10 +388,10 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bneg_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bneg_d_RES
   ret void
 }
 
@@ -410,10 +410,10 @@ declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_bset_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bset_b_RES
   ret void
 }
 
@@ -432,10 +432,10 @@ declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bset_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bset_h_RES
   ret void
 }
 
@@ -454,10 +454,10 @@ declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bset_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bset_w_RES
   ret void
 }
 
@@ -476,10 +476,10 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bset_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bset_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-c.ll b/llvm/test/CodeGen/Mips/msa/3r-c.ll
index 7c3fc90ff3c1b..8af06b3f20bd4 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-c.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-c.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_ceq_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ceq_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ceq_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ceq_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ceq_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ceq_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ceq_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ceq_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ceq_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ceq_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ceq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ceq_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ceq_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ceq_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ceq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ceq_d_RES
   ret void
 }
 
@@ -98,10 +98,10 @@ declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_cle_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_cle_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_cle_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_cle_s_b_RES
   ret void
 }
 
@@ -120,10 +120,10 @@ declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_cle_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_cle_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_cle_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_cle_s_h_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_cle_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_cle_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_cle_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_cle_s_w_RES
   ret void
 }
 
@@ -164,10 +164,10 @@ declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_cle_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_cle_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_cle_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_cle_s_d_RES
   ret void
 }
 
@@ -186,10 +186,10 @@ declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_cle_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_cle_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_cle_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_cle_u_b_RES
   ret void
 }
 
@@ -208,10 +208,10 @@ declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_cle_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_cle_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_cle_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_cle_u_h_RES
   ret void
 }
 
@@ -230,10 +230,10 @@ declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_cle_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_cle_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_cle_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_cle_u_w_RES
   ret void
 }
 
@@ -252,10 +252,10 @@ declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_cle_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_cle_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_cle_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_cle_u_d_RES
   ret void
 }
 
@@ -274,10 +274,10 @@ declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_clt_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_clt_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_clt_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_clt_s_b_RES
   ret void
 }
 
@@ -296,10 +296,10 @@ declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_clt_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_clt_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_clt_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_clt_s_h_RES
   ret void
 }
 
@@ -318,10 +318,10 @@ declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_clt_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_clt_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_clt_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_clt_s_w_RES
   ret void
 }
 
@@ -340,10 +340,10 @@ declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_clt_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_clt_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_clt_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_clt_s_d_RES
   ret void
 }
 
@@ -362,10 +362,10 @@ declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_clt_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_clt_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_clt_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_clt_u_b_RES
   ret void
 }
 
@@ -384,10 +384,10 @@ declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_clt_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_clt_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_clt_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_clt_u_h_RES
   ret void
 }
 
@@ -406,10 +406,10 @@ declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_clt_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_clt_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_clt_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_clt_u_w_RES
   ret void
 }
 
@@ -428,10 +428,10 @@ declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_clt_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_clt_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_clt_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_clt_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-d.ll b/llvm/test/CodeGen/Mips/msa/3r-d.ll
index 71d042ee7f05e..b40d2661ee6aa 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-d.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-d.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_div_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_div_s_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_div_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_div_s_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_div_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_div_s_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_div_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_div_s_d_RES
   ret void
 }
 
@@ -95,10 +95,10 @@ declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @div_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_div_s_b_ARG2
   %2 = sdiv <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_div_s_b_RES
   ret void
 }
 
@@ -111,10 +111,10 @@ entry:
 
 define void @div_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_div_s_h_ARG2
   %2 = sdiv <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_div_s_h_RES
   ret void
 }
 
@@ -127,10 +127,10 @@ entry:
 
 define void @div_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_div_s_w_ARG2
   %2 = sdiv <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_div_s_w_RES
   ret void
 }
 
@@ -143,10 +143,10 @@ entry:
 
 define void @div_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_div_s_d_ARG2
   %2 = sdiv <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_div_s_d_RES
   ret void
 }
 
@@ -163,10 +163,10 @@ entry:
 
 define void @llvm_mips_div_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_div_u_b_RES
   ret void
 }
 
@@ -185,10 +185,10 @@ declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_div_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_div_u_h_RES
   ret void
 }
 
@@ -207,10 +207,10 @@ declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_div_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_div_u_w_RES
   ret void
 }
 
@@ -229,10 +229,10 @@ declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_div_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_div_u_d_RES
   ret void
 }
 
@@ -248,10 +248,10 @@ declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @div_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_div_u_b_ARG2
   %2 = udiv <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_div_u_b_RES
   ret void
 }
 
@@ -264,10 +264,10 @@ entry:
 
 define void @div_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_div_u_h_ARG2
   %2 = udiv <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_div_u_h_RES
   ret void
 }
 
@@ -280,10 +280,10 @@ entry:
 
 define void @div_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_div_u_w_ARG2
   %2 = udiv <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_div_u_w_RES
   ret void
 }
 
@@ -296,10 +296,10 @@ entry:
 
 define void @div_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_div_u_d_ARG2
   %2 = udiv <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_div_u_d_RES
   ret void
 }
 
@@ -326,10 +326,10 @@ entry:
 
 define void @llvm_mips_dotp_s_h_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_dotp_s_h_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_dotp_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_dotp_s_h_RES
   ret void
 }
 
@@ -353,10 +353,10 @@ declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dotp_s_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_dotp_s_w_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_dotp_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_dotp_s_w_RES
   ret void
 }
 
@@ -377,10 +377,10 @@ declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dotp_s_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_dotp_s_d_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_dotp_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_dotp_s_d_RES
   ret void
 }
 
@@ -409,10 +409,10 @@ declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_dotp_u_h_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_dotp_u_h_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_dotp_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_dotp_u_h_RES
   ret void
 }
 
@@ -436,10 +436,10 @@ declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dotp_u_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_dotp_u_w_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_dotp_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_dotp_u_w_RES
   ret void
 }
 
@@ -460,10 +460,10 @@ declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dotp_u_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_dotp_u_d_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_dotp_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_dotp_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-i.ll b/llvm/test/CodeGen/Mips/msa/3r-i.ll
index 73ca5bc953874..c06d79a975bf1 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-i.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-i.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_ilvev_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ilvev_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ilvev_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ilvev_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ilvev_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ilvev_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ilvev_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ilvev_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ilvev_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ilvev_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ilvev_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ilvev_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ilvev_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ilvev_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ilvev_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ilvev_d_RES
   ret void
 }
 
@@ -98,10 +98,10 @@ declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_ilvl_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ilvl_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ilvl_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ilvl_b_RES
   ret void
 }
 
@@ -120,10 +120,10 @@ declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ilvl_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ilvl_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ilvl_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ilvl_h_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ilvl_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ilvl_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ilvl_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ilvl_w_RES
   ret void
 }
 
@@ -164,10 +164,10 @@ declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ilvl_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ilvl_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ilvl_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ilvl_d_RES
   ret void
 }
 
@@ -186,10 +186,10 @@ declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_ilvod_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ilvod_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ilvod_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ilvod_b_RES
   ret void
 }
 
@@ -208,10 +208,10 @@ declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ilvod_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ilvod_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ilvod_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ilvod_h_RES
   ret void
 }
 
@@ -230,10 +230,10 @@ declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ilvod_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ilvod_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ilvod_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ilvod_w_RES
   ret void
 }
 
@@ -252,10 +252,10 @@ declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ilvod_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ilvod_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ilvod_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ilvod_d_RES
   ret void
 }
 
@@ -274,10 +274,10 @@ declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_ilvr_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_ilvr_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_ilvr_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_ilvr_b_RES
   ret void
 }
 
@@ -296,10 +296,10 @@ declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_ilvr_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_ilvr_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_ilvr_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_ilvr_h_RES
   ret void
 }
 
@@ -318,10 +318,10 @@ declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_ilvr_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_ilvr_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_ilvr_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_ilvr_w_RES
   ret void
 }
 
@@ -340,10 +340,10 @@ declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_ilvr_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_ilvr_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_ilvr_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_ilvr_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-m.ll b/llvm/test/CodeGen/Mips/msa/3r-m.ll
index 7fc7410feabf9..855ceb3dd8890 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-m.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-m.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_max_a_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_max_a_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_max_a_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_max_a_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_max_a_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_max_a_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_max_a_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_max_a_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_max_a_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_max_a_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_max_a_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_max_a_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_max_a_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_max_a_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_max_a_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_max_a_d_RES
   ret void
 }
 
@@ -98,10 +98,10 @@ declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_max_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_max_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_max_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_max_s_b_RES
   ret void
 }
 
@@ -120,10 +120,10 @@ declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_max_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_max_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_max_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_max_s_h_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_max_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_max_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_max_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_max_s_w_RES
   ret void
 }
 
@@ -164,10 +164,10 @@ declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_max_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_max_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_max_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_max_s_d_RES
   ret void
 }
 
@@ -186,10 +186,10 @@ declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_max_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_max_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_max_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_max_u_b_RES
   ret void
 }
 
@@ -208,10 +208,10 @@ declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_max_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_max_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_max_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_max_u_h_RES
   ret void
 }
 
@@ -230,10 +230,10 @@ declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_max_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_max_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_max_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_max_u_w_RES
   ret void
 }
 
@@ -252,10 +252,10 @@ declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_max_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_max_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_max_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_max_u_d_RES
   ret void
 }
 
@@ -274,10 +274,10 @@ declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_min_a_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_min_a_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_min_a_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_min_a_b_RES
   ret void
 }
 
@@ -296,10 +296,10 @@ declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_min_a_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_min_a_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_min_a_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_min_a_h_RES
   ret void
 }
 
@@ -318,10 +318,10 @@ declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_min_a_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_min_a_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_min_a_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_min_a_w_RES
   ret void
 }
 
@@ -340,10 +340,10 @@ declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_min_a_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_min_a_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_min_a_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_min_a_d_RES
   ret void
 }
 
@@ -362,10 +362,10 @@ declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_min_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_min_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_min_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_min_s_b_RES
   ret void
 }
 
@@ -384,10 +384,10 @@ declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_min_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_min_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_min_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_min_s_h_RES
   ret void
 }
 
@@ -406,10 +406,10 @@ declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_min_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_min_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_min_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_min_s_w_RES
   ret void
 }
 
@@ -428,10 +428,10 @@ declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_min_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_min_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_min_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_min_s_d_RES
   ret void
 }
 
@@ -450,10 +450,10 @@ declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_min_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_min_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_min_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_min_u_b_RES
   ret void
 }
 
@@ -472,10 +472,10 @@ declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_min_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_min_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_min_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_min_u_h_RES
   ret void
 }
 
@@ -494,10 +494,10 @@ declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_min_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_min_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_min_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_min_u_w_RES
   ret void
 }
 
@@ -516,10 +516,10 @@ declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_min_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_min_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_min_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_min_u_d_RES
   ret void
 }
 
@@ -538,10 +538,10 @@ declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_mod_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_mod_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_mod_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_mod_s_b_RES
   ret void
 }
 
@@ -560,10 +560,10 @@ declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_mod_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mod_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mod_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mod_s_h_RES
   ret void
 }
 
@@ -582,10 +582,10 @@ declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_mod_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mod_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mod_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mod_s_w_RES
   ret void
 }
 
@@ -604,10 +604,10 @@ declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_mod_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_mod_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_mod_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_mod_s_d_RES
   ret void
 }
 
@@ -626,10 +626,10 @@ declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_mod_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_mod_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_mod_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_mod_u_b_RES
   ret void
 }
 
@@ -648,10 +648,10 @@ declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_mod_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mod_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mod_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mod_u_h_RES
   ret void
 }
 
@@ -670,10 +670,10 @@ declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_mod_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mod_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mod_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mod_u_w_RES
   ret void
 }
 
@@ -692,10 +692,10 @@ declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_mod_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_mod_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_mod_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_mod_u_d_RES
   ret void
 }
 
@@ -714,10 +714,10 @@ declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_mulv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_mulv_b_RES
   ret void
 }
 
@@ -736,10 +736,10 @@ declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_mulv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mulv_h_RES
   ret void
 }
 
@@ -758,10 +758,10 @@ declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_mulv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mulv_w_RES
   ret void
 }
 
@@ -780,10 +780,10 @@ declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_mulv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_mulv_d_RES
   ret void
 }
 
@@ -798,10 +798,10 @@ declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @mulv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_mulv_b_ARG2
   %2 = mul <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_mulv_b_RES
   ret void
 }
 
@@ -814,10 +814,10 @@ entry:
 
 define void @mulv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mulv_h_ARG2
   %2 = mul <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mulv_h_RES
   ret void
 }
 
@@ -830,10 +830,10 @@ entry:
 
 define void @mulv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mulv_w_ARG2
   %2 = mul <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mulv_w_RES
   ret void
 }
 
@@ -846,10 +846,10 @@ entry:
 
 define void @mulv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_mulv_d_ARG2
   %2 = mul <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_mulv_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-p.ll b/llvm/test/CodeGen/Mips/msa/3r-p.ll
index bd28741fe86e2..063da01c29db6 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-p.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-p.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_pckev_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_pckev_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_pckev_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_pckev_b_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_pckev_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_pckev_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_pckev_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_pckev_h_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_pckev_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_pckev_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_pckev_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_pckev_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_pckev_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_pckev_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_pckev_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_pckev_d_RES
   ret void
 }
 
@@ -98,10 +98,10 @@ declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_pckod_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_pckod_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_pckod_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_pckod_b_RES
   ret void
 }
 
@@ -120,10 +120,10 @@ declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_pckod_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_pckod_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_pckod_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_pckod_h_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_pckod_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_pckod_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_pckod_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_pckod_w_RES
   ret void
 }
 
@@ -164,10 +164,10 @@ declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_pckod_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_pckod_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_pckod_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_pckod_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-s.ll b/llvm/test/CodeGen/Mips/msa/3r-s.ll
index 5df1b826935b9..6c673c543bf2f 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-s.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-s.ll
@@ -11,11 +11,11 @@
 
 define void @llvm_mips_sld_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG2
-  %2 = load i32, i32* @llvm_mips_sld_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_sld_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sld_b_ARG2
+  %2 = load i32, ptr @llvm_mips_sld_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_sld_b_RES
   ret void
 }
 
@@ -39,11 +39,11 @@ declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind
 
 define void @llvm_mips_sld_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG2
-  %2 = load i32, i32* @llvm_mips_sld_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_sld_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sld_h_ARG2
+  %2 = load i32, ptr @llvm_mips_sld_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_sld_h_RES
   ret void
 }
 
@@ -67,11 +67,11 @@ declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind
 
 define void @llvm_mips_sld_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG2
-  %2 = load i32, i32* @llvm_mips_sld_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_sld_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sld_w_ARG2
+  %2 = load i32, ptr @llvm_mips_sld_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_sld_w_RES
   ret void
 }
 
@@ -95,11 +95,11 @@ declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind
 
 define void @llvm_mips_sld_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG2
-  %2 = load i32, i32* @llvm_mips_sld_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_sld_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sld_d_ARG2
+  %2 = load i32, ptr @llvm_mips_sld_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_sld_d_RES
   ret void
 }
 
@@ -122,10 +122,10 @@ declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind
 
 define void @llvm_mips_sll_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
   ret void
 }
 
@@ -146,10 +146,10 @@ declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_sll_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
   ret void
 }
 
@@ -170,10 +170,10 @@ declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_sll_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
   ret void
 }
 
@@ -194,10 +194,10 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_sll_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
   ret void
 }
 
@@ -214,10 +214,10 @@ declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @sll_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
   %2 = shl <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
   ret void
 }
 
@@ -232,10 +232,10 @@ entry:
 
 define void @sll_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
   %2 = shl <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
   ret void
 }
 
@@ -250,10 +250,10 @@ entry:
 
 define void @sll_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
   %2 = shl <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
   ret void
 }
 
@@ -268,10 +268,10 @@ entry:
 
 define void @sll_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
   %2 = shl <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
   ret void
 }
 
@@ -290,10 +290,10 @@ entry:
 
 define void @llvm_mips_sra_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
   ret void
 }
 
@@ -314,10 +314,10 @@ declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_sra_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
   ret void
 }
 
@@ -338,10 +338,10 @@ declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_sra_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
   ret void
 }
 
@@ -362,10 +362,10 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_sra_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
   ret void
 }
 
@@ -383,10 +383,10 @@ declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @sra_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
   %2 = ashr <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
   ret void
 }
 
@@ -401,10 +401,10 @@ entry:
 
 define void @sra_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
   %2 = ashr <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
   ret void
 }
 
@@ -419,10 +419,10 @@ entry:
 
 define void @sra_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
   %2 = ashr <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
   ret void
 }
 
@@ -437,10 +437,10 @@ entry:
 
 define void @sra_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
   %2 = ashr <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
   ret void
 }
 
@@ -459,10 +459,10 @@ entry:
 
 define void @llvm_mips_srar_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_srar_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_srar_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_srar_b_RES
   ret void
 }
 
@@ -483,10 +483,10 @@ declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_srar_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_srar_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_srar_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_srar_h_RES
   ret void
 }
 
@@ -507,10 +507,10 @@ declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_srar_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_srar_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_srar_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_srar_w_RES
   ret void
 }
 
@@ -531,10 +531,10 @@ declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_srar_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_srar_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_srar_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_srar_d_RES
   ret void
 }
 
@@ -555,10 +555,10 @@ declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_srl_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
   ret void
 }
 
@@ -579,10 +579,10 @@ declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_srl_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
   ret void
 }
 
@@ -603,10 +603,10 @@ declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_srl_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
   ret void
 }
 
@@ -627,10 +627,10 @@ declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_srl_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
   ret void
 }
 
@@ -651,10 +651,10 @@ declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_srlr_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_srlr_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_srlr_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_srlr_b_RES
   ret void
 }
 
@@ -675,10 +675,10 @@ declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_srlr_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_srlr_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_srlr_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_srlr_h_RES
   ret void
 }
 
@@ -699,10 +699,10 @@ declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_srlr_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_srlr_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_srlr_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_srlr_w_RES
   ret void
 }
 
@@ -723,10 +723,10 @@ declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_srlr_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_srlr_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_srlr_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_srlr_d_RES
   ret void
 }
 
@@ -744,10 +744,10 @@ declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @srl_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
   %2 = lshr <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
   ret void
 }
 
@@ -762,10 +762,10 @@ entry:
 
 define void @srl_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
   %2 = lshr <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
   ret void
 }
 
@@ -780,10 +780,10 @@ entry:
 
 define void @srl_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
   %2 = lshr <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
   ret void
 }
 
@@ -798,10 +798,10 @@ entry:
 
 define void @srl_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
   %2 = lshr <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
   ret void
 }
 
@@ -820,10 +820,10 @@ entry:
 
 define void @llvm_mips_subs_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subs_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subs_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subs_s_b_RES
   ret void
 }
 
@@ -844,10 +844,10 @@ declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_subs_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subs_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subs_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subs_s_h_RES
   ret void
 }
 
@@ -868,10 +868,10 @@ declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_subs_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subs_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subs_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subs_s_w_RES
   ret void
 }
 
@@ -892,10 +892,10 @@ declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_subs_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subs_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subs_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subs_s_d_RES
   ret void
 }
 
@@ -916,10 +916,10 @@ declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_subs_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subs_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subs_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subs_u_b_RES
   ret void
 }
 
@@ -940,10 +940,10 @@ declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_subs_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subs_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subs_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subs_u_h_RES
   ret void
 }
 
@@ -964,10 +964,10 @@ declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_subs_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subs_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subs_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subs_u_w_RES
   ret void
 }
 
@@ -988,10 +988,10 @@ declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_subs_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subs_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subs_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subs_u_d_RES
   ret void
 }
 
@@ -1012,10 +1012,10 @@ declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_subsus_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subsus_u_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subsus_u_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subsus_u_b_RES
   ret void
 }
 
@@ -1036,10 +1036,10 @@ declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_subsus_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subsus_u_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subsus_u_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subsus_u_h_RES
   ret void
 }
 
@@ -1060,10 +1060,10 @@ declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_subsus_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subsus_u_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subsus_u_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subsus_u_w_RES
   ret void
 }
 
@@ -1084,10 +1084,10 @@ declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_subsus_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subsus_u_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subsus_u_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subsus_u_d_RES
   ret void
 }
 
@@ -1108,10 +1108,10 @@ declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_subsuu_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subsuu_s_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subsuu_s_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subsuu_s_b_RES
   ret void
 }
 
@@ -1132,10 +1132,10 @@ declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_subsuu_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subsuu_s_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subsuu_s_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subsuu_s_h_RES
   ret void
 }
 
@@ -1156,10 +1156,10 @@ declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_subsuu_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subsuu_s_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subsuu_s_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subsuu_s_w_RES
   ret void
 }
 
@@ -1180,10 +1180,10 @@ declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_subsuu_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subsuu_s_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subsuu_s_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subsuu_s_d_RES
   ret void
 }
 
@@ -1204,10 +1204,10 @@ declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_subv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subv_b_RES
   ret void
 }
 
@@ -1228,10 +1228,10 @@ declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_subv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subv_h_RES
   ret void
 }
 
@@ -1252,10 +1252,10 @@ declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_subv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subv_w_RES
   ret void
 }
 
@@ -1276,10 +1276,10 @@ declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_subv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subv_d_RES
   ret void
 }
 
@@ -1297,10 +1297,10 @@ declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @subv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_subv_b_ARG2
   %2 = sub <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_subv_b_RES
   ret void
 }
 
@@ -1315,10 +1315,10 @@ entry:
 
 define void @subv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_subv_h_ARG2
   %2 = sub <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_subv_h_RES
   ret void
 }
 
@@ -1333,10 +1333,10 @@ entry:
 
 define void @subv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_subv_w_ARG2
   %2 = sub <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_subv_w_RES
   ret void
 }
 
@@ -1351,10 +1351,10 @@ entry:
 
 define void @subv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_subv_d_ARG2
   %2 = sub <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_subv_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r-v.ll b/llvm/test/CodeGen/Mips/msa/3r-v.ll
index 1bd995026f50e..80828a07907cd 100644
--- a/llvm/test/CodeGen/Mips/msa/3r-v.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r-v.ll
@@ -11,11 +11,11 @@
 
 define void @llvm_mips_vshf_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_vshf_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_vshf_b_RES
   ret void
 }
 
@@ -36,11 +36,11 @@ declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_vshf_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_vshf_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_vshf_h_RES
   ret void
 }
 
@@ -61,11 +61,11 @@ declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_vshf_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_vshf_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_vshf_w_RES
   ret void
 }
 
@@ -86,11 +86,11 @@ declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_vshf_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_vshf_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_vshf_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r_4r.ll b/llvm/test/CodeGen/Mips/msa/3r_4r.ll
index 3dd32b350b12e..abeaee682fb48 100644
--- a/llvm/test/CodeGen/Mips/msa/3r_4r.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r_4r.ll
@@ -11,11 +11,11 @@
 
 define void @llvm_mips_maddv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_maddv_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_maddv_b_RES
   ret void
 }
 
@@ -36,11 +36,11 @@ declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_maddv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_maddv_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_maddv_h_RES
   ret void
 }
 
@@ -61,11 +61,11 @@ declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_maddv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_maddv_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_maddv_w_RES
   ret void
 }
 
@@ -86,11 +86,11 @@ declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_maddv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_maddv_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_maddv_d_RES
   ret void
 }
 
@@ -111,11 +111,11 @@ declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_msubv_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_msubv_b_ARG3
   %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES
+  store <16 x i8> %3, ptr @llvm_mips_msubv_b_RES
   ret void
 }
 
@@ -136,11 +136,11 @@ declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_msubv_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_msubv_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_msubv_h_RES
   ret void
 }
 
@@ -161,11 +161,11 @@ declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_msubv_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_msubv_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_msubv_w_RES
   ret void
 }
 
@@ -186,11 +186,11 @@ declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_msubv_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_msubv_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_msubv_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll b/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll
index 4dad98958ae93..4b286a0e93cbd 100644
--- a/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll
@@ -11,10 +11,10 @@
 
 define void @llvm_mips_dpadd_s_h_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_dpadd_s_h_ARG2
+  %1 = load <16 x i8>, ptr @llvm_mips_dpadd_s_h_ARG3
   %2 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_s_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_dpadd_s_h_RES
   ret void
 }
 
@@ -34,10 +34,10 @@ declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dpadd_s_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_dpadd_s_w_ARG2
+  %1 = load <8 x i16>, ptr @llvm_mips_dpadd_s_w_ARG3
   %2 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_s_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_dpadd_s_w_RES
   ret void
 }
 
@@ -57,10 +57,10 @@ declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dpadd_s_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_dpadd_s_d_ARG2
+  %1 = load <4 x i32>, ptr @llvm_mips_dpadd_s_d_ARG3
   %2 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_s_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_dpadd_s_d_RES
   ret void
 }
 
@@ -80,10 +80,10 @@ declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_dpadd_u_h_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_dpadd_u_h_ARG2
+  %1 = load <16 x i8>, ptr @llvm_mips_dpadd_u_h_ARG3
   %2 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>, <16 x i8> %0, <16 x i8> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_dpadd_u_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_dpadd_u_h_RES
   ret void
 }
 
@@ -103,10 +103,10 @@ declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dpadd_u_w_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_dpadd_u_w_ARG2
+  %1 = load <8 x i16>, ptr @llvm_mips_dpadd_u_w_ARG3
   %2 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> <i32 4, i32 4, i32 4, i32 4>, <8 x i16> %0, <8 x i16> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_dpadd_u_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_dpadd_u_w_RES
   ret void
 }
 
@@ -126,10 +126,10 @@ declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dpadd_u_d_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_dpadd_u_d_ARG2
+  %1 = load <4 x i32>, ptr @llvm_mips_dpadd_u_d_ARG3
   %2 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> <i64 4, i64 4>, <4 x i32> %0, <4 x i32> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_dpadd_u_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_dpadd_u_d_RES
   ret void
 }
 
@@ -150,11 +150,11 @@ declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_dpsub_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_dpsub_s_h_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_dpsub_s_h_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_dpsub_s_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_dpsub_s_h_RES
   ret void
 }
 
@@ -175,11 +175,11 @@ declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dpsub_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_dpsub_s_w_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_dpsub_s_w_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_dpsub_s_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_dpsub_s_w_RES
   ret void
 }
 
@@ -200,11 +200,11 @@ declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dpsub_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_dpsub_s_d_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_dpsub_s_d_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_dpsub_s_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_dpsub_s_d_RES
   ret void
 }
 
@@ -225,11 +225,11 @@ declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_dpsub_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_dpsub_u_h_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_dpsub_u_h_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_dpsub_u_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_dpsub_u_h_RES
   ret void
 }
 
@@ -250,11 +250,11 @@ declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_dpsub_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_dpsub_u_w_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_dpsub_u_w_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_dpsub_u_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_dpsub_u_w_RES
   ret void
 }
 
@@ -275,11 +275,11 @@ declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_dpsub_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_dpsub_u_d_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_dpsub_u_d_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_dpsub_u_d_ARG3
   %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2)
-  store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES
+  store <2 x i64> %3, ptr @llvm_mips_dpsub_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3r_splat.ll b/llvm/test/CodeGen/Mips/msa/3r_splat.ll
index 850db64424513..e8d9d23fa9b49 100644
--- a/llvm/test/CodeGen/Mips/msa/3r_splat.ll
+++ b/llvm/test/CodeGen/Mips/msa/3r_splat.ll
@@ -11,9 +11,9 @@
 
 define void @llvm_mips_splat_b_test(i32 %a) nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splat_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_splat_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_splat_b_RES
   ret void
 }
 
@@ -32,9 +32,9 @@ declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_splat_h_test(i32 %a) nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splat_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_splat_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_splat_h_RES
   ret void
 }
 
@@ -53,9 +53,9 @@ declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_splat_w_test(i32 %a) nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splat_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_splat_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_splat_w_RES
   ret void
 }
 
@@ -74,9 +74,9 @@ declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_splat_d_test(i32 %a) nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splat_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_splat_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_splat_d_RES
   ret void
 }
 
@@ -93,7 +93,7 @@ declare <2 x i64> @llvm.mips.splat.d(<2 x i64>, i32) nounwind
 define void @llvm_mips_splat_d_arg_test(i32 %arg) {
 entry:
   %0 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> <i64 12720328, i64 10580959>, i32 %arg)
-  store volatile <2 x i64> %0, <2 x i64>* @llvm_mips_splat_d_RES
+  store volatile <2 x i64> %0, ptr @llvm_mips_splat_d_RES
   ret void
 }
 ; MIPS32-LABEL: llvm_mips_splat_d_arg_test
@@ -108,7 +108,7 @@ entry:
 define void @llvm_mips_splat_d_imm_test() {
 entry:
   %0 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> <i64 12720328, i64 10580959>, i32 76)
-  store volatile<2 x i64> %0, <2 x i64>* @llvm_mips_splat_d_RES
+  store volatile<2 x i64> %0, ptr @llvm_mips_splat_d_RES
   ret void
 }
 ; MIPS32-LABEL: llvm_mips_splat_d_imm_test

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf.ll b/llvm/test/CodeGen/Mips/msa/3rf.ll
index dcc7696142a3b..9bae9ba530453 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf.ll
@@ -9,10 +9,10 @@
 
 define void @llvm_mips_fadd_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fadd_w_RES
   ret void
 }
 
@@ -31,10 +31,10 @@ declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fadd_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fadd_d_RES
   ret void
 }
 
@@ -49,10 +49,10 @@ declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind
 
 define void @fadd_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fadd_w_ARG2
   %2 = fadd <4 x float> %0, %1
-  store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fadd_w_RES
   ret void
 }
 
@@ -65,10 +65,10 @@ entry:
 
 define void @fadd_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fadd_d_ARG2
   %2 = fadd <2 x double> %0, %1
-  store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fadd_d_RES
   ret void
 }
 
@@ -85,10 +85,10 @@ entry:
 
 define void @llvm_mips_fdiv_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fdiv_w_RES
   ret void
 }
 
@@ -107,10 +107,10 @@ declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fdiv_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fdiv_d_RES
   ret void
 }
 
@@ -125,10 +125,10 @@ declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind
 
 define void @fdiv_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fdiv_w_ARG2
   %2 = fdiv <4 x float> %0, %1
-  store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fdiv_w_RES
   ret void
 }
 
@@ -141,10 +141,10 @@ entry:
 
 define void @fdiv_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fdiv_d_ARG2
   %2 = fdiv <2 x double> %0, %1
-  store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fdiv_d_RES
   ret void
 }
 
@@ -161,10 +161,10 @@ entry:
 
 define void @llvm_mips_fmin_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmin_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmin_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmin_w_RES
   ret void
 }
 
@@ -183,10 +183,10 @@ declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fmin_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmin_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmin_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmin_d_RES
   ret void
 }
 
@@ -205,10 +205,10 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fmin_a_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmin_a_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmin_a_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmin_a_w_RES
   ret void
 }
 
@@ -227,10 +227,10 @@ declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fmin_a_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmin_a_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmin_a_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmin_a_d_RES
   ret void
 }
 
@@ -249,10 +249,10 @@ declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fmax_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmax_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmax_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmax_w_RES
   ret void
 }
 
@@ -271,10 +271,10 @@ declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fmax_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmax_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmax_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmax_d_RES
   ret void
 }
 
@@ -293,10 +293,10 @@ declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fmax_a_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmax_a_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmax_a_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmax_a_w_RES
   ret void
 }
 
@@ -315,10 +315,10 @@ declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fmax_a_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmax_a_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmax_a_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmax_a_d_RES
   ret void
 }
 
@@ -337,10 +337,10 @@ declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fmul_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmul_w_RES
   ret void
 }
 
@@ -359,10 +359,10 @@ declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fmul_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmul_d_RES
   ret void
 }
 
@@ -377,10 +377,10 @@ declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind
 
 define void @fmul_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmul_w_ARG2
   %2 = fmul <4 x float> %0, %1
-  store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fmul_w_RES
   ret void
 }
 
@@ -393,10 +393,10 @@ entry:
 
 define void @fmul_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmul_d_ARG2
   %2 = fmul <2 x double> %0, %1
-  store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fmul_d_RES
   ret void
 }
 
@@ -413,10 +413,10 @@ entry:
 
 define void @llvm_mips_fsub_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fsub_w_RES
   ret void
 }
 
@@ -435,10 +435,10 @@ declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsub_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fsub_d_RES
   ret void
 }
 
@@ -454,10 +454,10 @@ declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind
 
 define void @fsub_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsub_w_ARG2
   %2 = fsub <4 x float> %0, %1
-  store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fsub_w_RES
   ret void
 }
 
@@ -470,10 +470,10 @@ entry:
 
 define void @fsub_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsub_d_ARG2
   %2 = fsub <2 x double> %0, %1
-  store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fsub_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll b/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll
index 633282314ecfd..6142ada9fef91 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll
@@ -11,11 +11,11 @@
 
 define void @llvm_mips_fmadd_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG2
-  %2 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG3
+  %0 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG2
+  %2 = load <4 x float>, ptr @llvm_mips_fmadd_w_ARG3
   %3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES
+  store <4 x float> %3, ptr @llvm_mips_fmadd_w_RES
   ret void
 }
 
@@ -36,11 +36,11 @@ declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) no
 
 define void @llvm_mips_fmadd_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG2
-  %2 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG3
+  %0 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG2
+  %2 = load <2 x double>, ptr @llvm_mips_fmadd_d_ARG3
   %3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES
+  store <2 x double> %3, ptr @llvm_mips_fmadd_d_RES
   ret void
 }
 
@@ -61,11 +61,11 @@ declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double>
 
 define void @llvm_mips_fmsub_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG2
-  %2 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG3
+  %0 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG2
+  %2 = load <4 x float>, ptr @llvm_mips_fmsub_w_ARG3
   %3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES
+  store <4 x float> %3, ptr @llvm_mips_fmsub_w_RES
   ret void
 }
 
@@ -86,11 +86,11 @@ declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) no
 
 define void @llvm_mips_fmsub_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG2
-  %2 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG3
+  %0 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG2
+  %2 = load <2 x double>, ptr @llvm_mips_fmsub_d_ARG3
   %3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES
+  store <2 x double> %3, ptr @llvm_mips_fmsub_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll
index d253c945a6862..f397644df3919 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll
@@ -11,11 +11,11 @@
 
 define void @llvm_mips_madd_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_madd_q_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_madd_q_h_RES
   ret void
 }
 
@@ -36,11 +36,11 @@ declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_madd_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_madd_q_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_madd_q_w_RES
   ret void
 }
 
@@ -61,11 +61,11 @@ declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_maddr_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_maddr_q_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_maddr_q_h_RES
   ret void
 }
 
@@ -86,11 +86,11 @@ declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_maddr_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_maddr_q_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_maddr_q_w_RES
   ret void
 }
 
@@ -111,11 +111,11 @@ declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_msub_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_msub_q_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_msub_q_h_RES
   ret void
 }
 
@@ -136,11 +136,11 @@ declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_msub_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_msub_q_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_msub_q_w_RES
   ret void
 }
 
@@ -161,11 +161,11 @@ declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_msubr_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_msubr_q_h_ARG3
   %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES
+  store <8 x i16> %3, ptr @llvm_mips_msubr_q_h_RES
   ret void
 }
 
@@ -186,11 +186,11 @@ declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_msubr_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_msubr_q_w_ARG3
   %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES
+  store <4 x i32> %3, ptr @llvm_mips_msubr_q_w_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll b/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll
index e6acfe986cbee..70da349d0f13e 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_fexdo_h_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fexdo_h_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fexdo_h_ARG2
   %2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1)
-  store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES
+  store <8 x half> %2, ptr @llvm_mips_fexdo_h_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fexdo_w_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fexdo_w_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fexdo_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fexdo_w_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll b/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll
index 71146e23ff6c8..4c1328b49be56 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_fexp2_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fexp2_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_fexp2_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fexp2_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_fexp2_w_ARG2
   %2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1)
-  store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES
+  store <4 x float> %2, ptr @llvm_mips_fexp2_w_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind
 
 define void @llvm_mips_fexp2_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fexp2_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_fexp2_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fexp2_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_fexp2_d_ARG2
   %2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1)
-  store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES
+  store <2 x double> %2, ptr @llvm_mips_fexp2_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll b/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll
index 1f1b4ae7e3c16..7e186beb32050 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_fcaf_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcaf_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcaf_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcaf_w_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcaf_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcaf_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcaf_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcaf_d_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fceq_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fceq_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fceq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fceq_w_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fceq_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fceq_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fceq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fceq_d_RES
   ret void
 }
 
@@ -98,10 +98,10 @@ declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcle_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcle_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcle_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcle_w_RES
   ret void
 }
 
@@ -120,10 +120,10 @@ declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcle_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcle_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcle_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcle_d_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fclt_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fclt_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fclt_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fclt_w_RES
   ret void
 }
 
@@ -164,10 +164,10 @@ declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fclt_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fclt_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fclt_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fclt_d_RES
   ret void
 }
 
@@ -186,10 +186,10 @@ declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcor_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcor_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcor_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcor_w_RES
   ret void
 }
 
@@ -208,10 +208,10 @@ declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcor_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcor_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcor_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcor_d_RES
   ret void
 }
 
@@ -230,10 +230,10 @@ declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcne_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcne_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcne_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcne_w_RES
   ret void
 }
 
@@ -252,10 +252,10 @@ declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcne_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcne_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcne_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcne_d_RES
   ret void
 }
 
@@ -274,10 +274,10 @@ declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcueq_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcueq_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcueq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcueq_w_RES
   ret void
 }
 
@@ -296,10 +296,10 @@ declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcueq_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcueq_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcueq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcueq_d_RES
   ret void
 }
 
@@ -318,10 +318,10 @@ declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcult_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcult_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcult_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcult_w_RES
   ret void
 }
 
@@ -340,10 +340,10 @@ declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcult_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcult_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcult_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcult_d_RES
   ret void
 }
 
@@ -362,10 +362,10 @@ declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcule_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcule_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcule_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcule_w_RES
   ret void
 }
 
@@ -384,10 +384,10 @@ declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcule_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcule_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcule_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcule_d_RES
   ret void
 }
 
@@ -406,10 +406,10 @@ declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcun_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcun_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcun_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcun_w_RES
   ret void
 }
 
@@ -428,10 +428,10 @@ declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcun_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcun_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcun_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcun_d_RES
   ret void
 }
 
@@ -450,10 +450,10 @@ declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fcune_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fcune_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fcune_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fcune_w_RES
   ret void
 }
 
@@ -472,10 +472,10 @@ declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fcune_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fcune_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fcune_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fcune_d_RES
   ret void
 }
 
@@ -494,10 +494,10 @@ declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsaf_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsaf_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsaf_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsaf_w_RES
   ret void
 }
 
@@ -516,10 +516,10 @@ declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsaf_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsaf_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsaf_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsaf_d_RES
   ret void
 }
 
@@ -538,10 +538,10 @@ declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fseq_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fseq_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fseq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fseq_w_RES
   ret void
 }
 
@@ -560,10 +560,10 @@ declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fseq_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fseq_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fseq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fseq_d_RES
   ret void
 }
 
@@ -582,10 +582,10 @@ declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsle_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsle_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsle_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsle_w_RES
   ret void
 }
 
@@ -604,10 +604,10 @@ declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsle_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsle_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsle_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsle_d_RES
   ret void
 }
 
@@ -626,10 +626,10 @@ declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fslt_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fslt_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fslt_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fslt_w_RES
   ret void
 }
 
@@ -648,10 +648,10 @@ declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fslt_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fslt_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fslt_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fslt_d_RES
   ret void
 }
 
@@ -670,10 +670,10 @@ declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsor_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsor_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsor_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsor_w_RES
   ret void
 }
 
@@ -692,10 +692,10 @@ declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsor_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsor_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsor_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsor_d_RES
   ret void
 }
 
@@ -714,10 +714,10 @@ declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsne_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsne_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsne_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsne_w_RES
   ret void
 }
 
@@ -736,10 +736,10 @@ declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsne_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsne_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsne_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsne_d_RES
   ret void
 }
 
@@ -758,10 +758,10 @@ declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsueq_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsueq_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsueq_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsueq_w_RES
   ret void
 }
 
@@ -780,10 +780,10 @@ declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsueq_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsueq_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsueq_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsueq_d_RES
   ret void
 }
 
@@ -802,10 +802,10 @@ declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsult_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsult_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsult_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsult_w_RES
   ret void
 }
 
@@ -824,10 +824,10 @@ declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsult_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsult_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsult_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsult_d_RES
   ret void
 }
 
@@ -846,10 +846,10 @@ declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsule_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsule_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsule_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsule_w_RES
   ret void
 }
 
@@ -868,10 +868,10 @@ declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsule_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsule_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsule_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsule_d_RES
   ret void
 }
 
@@ -890,10 +890,10 @@ declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsun_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsun_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsun_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsun_w_RES
   ret void
 }
 
@@ -912,10 +912,10 @@ declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsun_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsun_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsun_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsun_d_RES
   ret void
 }
 
@@ -934,10 +934,10 @@ declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind
 
 define void @llvm_mips_fsune_w_test() nounwind {
 entry:
-  %0 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG1
-  %1 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG2
+  %0 = load <4 x float>, ptr @llvm_mips_fsune_w_ARG1
+  %1 = load <4 x float>, ptr @llvm_mips_fsune_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_fsune_w_RES
   ret void
 }
 
@@ -956,10 +956,10 @@ declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind
 
 define void @llvm_mips_fsune_d_test() nounwind {
 entry:
-  %0 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG1
-  %1 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG2
+  %0 = load <2 x double>, ptr @llvm_mips_fsune_d_ARG1
+  %1 = load <2 x double>, ptr @llvm_mips_fsune_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_fsune_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/3rf_q.ll b/llvm/test/CodeGen/Mips/msa/3rf_q.ll
index 932350a718d14..5e3358ccd063a 100644
--- a/llvm/test/CodeGen/Mips/msa/3rf_q.ll
+++ b/llvm/test/CodeGen/Mips/msa/3rf_q.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_mul_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mul_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mul_q_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mul_q_h_RES
   ret void
 }
 
@@ -32,10 +32,10 @@ declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_mul_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mul_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mul_q_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mul_q_w_RES
   ret void
 }
 
@@ -54,10 +54,10 @@ declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_mulr_q_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_mulr_q_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_mulr_q_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_mulr_q_h_RES
   ret void
 }
 
@@ -76,10 +76,10 @@ declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_mulr_q_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_mulr_q_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_mulr_q_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_mulr_q_w_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/arithmetic.ll b/llvm/test/CodeGen/Mips/msa/arithmetic.ll
index 0caafb6cc0982..62fd35a69abaa 100644
--- a/llvm/test/CodeGen/Mips/msa/arithmetic.ll
+++ b/llvm/test/CodeGen/Mips/msa/arithmetic.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPS
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=ALL,MIPSEL
 
-define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @add_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: add_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -10,14 +10,14 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    addv.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = add <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @add_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: add_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -25,14 +25,14 @@ define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    addv.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = add <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @add_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: add_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -40,14 +40,14 @@ define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    addv.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = add <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @add_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: add_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -55,68 +55,68 @@ define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    addv.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = add <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @add_v16i8_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: add_v16i8_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($5)
 ; ALL-NEXT:    addvi.b $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = add <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
               i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @add_v8i16_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: add_v8i16_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($5)
 ; ALL-NEXT:    addvi.h $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = add <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
               i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @add_v4i32_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: add_v4i32_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($5)
 ; ALL-NEXT:    addvi.w $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = add <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @add_v2i64_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: add_v2i64_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($5)
 ; ALL-NEXT:    addvi.d $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = add <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sub_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: sub_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -124,14 +124,14 @@ define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    subv.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = sub <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sub_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: sub_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -139,14 +139,14 @@ define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    subv.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = sub <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sub_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: sub_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -154,14 +154,14 @@ define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    subv.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = sub <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sub_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: sub_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -169,109 +169,109 @@ define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    subv.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = sub <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sub_v16i8_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v16i8_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($5)
 ; ALL-NEXT:    subvi.b $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = sub <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
               i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @sub_v16i8_i_negated(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sub_v16i8_i_negated(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v16i8_i_negated:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($5)
 ; ALL-NEXT:    subvi.b $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = add <16 x i8> %1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
               i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sub_v8i16_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v8i16_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($5)
 ; ALL-NEXT:    subvi.h $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = sub <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1,
               i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @sub_v8i16_i_negated(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sub_v8i16_i_negated(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v8i16_i_negated:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($5)
 ; ALL-NEXT:    subvi.h $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = add <8 x i16> %1, <i16 -1, i16 -1, i16 -1, i16 -1,
               i16 -1, i16 -1, i16 -1, i16 -1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sub_v4i32_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v4i32_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($5)
 ; ALL-NEXT:    subvi.w $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = sub <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @sub_v4i32_i_negated(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sub_v4i32_i_negated(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v4i32_i_negated:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($5)
 ; ALL-NEXT:    subvi.w $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = add <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sub_v2i64_i(ptr %c, ptr %a) nounwind {
 ; ALL-LABEL: sub_v2i64_i:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($5)
 ; ALL-NEXT:    subvi.d $w0, $w0, 1
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = sub <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @sub_v2i64_i_negated(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sub_v2i64_i_negated(ptr %c, ptr %a) nounwind {
 ; MIPS-LABEL: sub_v2i64_i_negated:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.b $w0, -1
@@ -288,13 +288,13 @@ define void @sub_v2i64_i_negated(<2 x i64>* %c, <2 x i64>* %a) nounwind {
 ; MIPSEL-NEXT:    addv.d $w0, $w1, $w0
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = add <2 x i64> %1, <i64 -1, i64 -1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mul_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mul_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -302,14 +302,14 @@ define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    mulv.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = mul <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mul_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mul_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -317,14 +317,14 @@ define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    mulv.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = mul <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mul_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mul_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -332,14 +332,14 @@ define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    mulv.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = mul <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mul_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mul_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -347,14 +347,14 @@ define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    mulv.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = mul <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+define void @maddv_v16i8(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: maddv_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($7)
@@ -363,17 +363,17 @@ define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
 ; ALL-NEXT:    maddv.b $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w2, 0($4)
-             <16 x i8>* %c) nounwind {
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
-  %3 = load <16 x i8>, <16 x i8>* %c
+             ptr %c) nounwind {
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
+  %3 = load <16 x i8>, ptr %c
   %4 = mul <16 x i8> %2, %3
   %5 = add <16 x i8> %4, %1
-  store <16 x i8> %5, <16 x i8>* %d
+  store <16 x i8> %5, ptr %d
   ret void
 }
 
-define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+define void @maddv_v8i16(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: maddv_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($7)
@@ -382,17 +382,17 @@ define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
 ; ALL-NEXT:    maddv.h $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w2, 0($4)
-             <8 x i16>* %c) nounwind {
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
-  %3 = load <8 x i16>, <8 x i16>* %c
+             ptr %c) nounwind {
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
+  %3 = load <8 x i16>, ptr %c
   %4 = mul <8 x i16> %2, %3
   %5 = add <8 x i16> %4, %1
-  store <8 x i16> %5, <8 x i16>* %d
+  store <8 x i16> %5, ptr %d
   ret void
 }
 
-define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+define void @maddv_v4i32(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: maddv_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($7)
@@ -401,17 +401,17 @@ define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
 ; ALL-NEXT:    maddv.w $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w2, 0($4)
-             <4 x i32>* %c) nounwind {
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
-  %3 = load <4 x i32>, <4 x i32>* %c
+             ptr %c) nounwind {
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
+  %3 = load <4 x i32>, ptr %c
   %4 = mul <4 x i32> %2, %3
   %5 = add <4 x i32> %4, %1
-  store <4 x i32> %5, <4 x i32>* %d
+  store <4 x i32> %5, ptr %d
   ret void
 }
 
-define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+define void @maddv_v2i64(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: maddv_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($7)
@@ -420,17 +420,17 @@ define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
 ; ALL-NEXT:    maddv.d $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w2, 0($4)
-             <2 x i64>* %c) nounwind {
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
-  %3 = load <2 x i64>, <2 x i64>* %c
+             ptr %c) nounwind {
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
+  %3 = load <2 x i64>, ptr %c
   %4 = mul <2 x i64> %2, %3
   %5 = add <2 x i64> %4, %1
-  store <2 x i64> %5, <2 x i64>* %d
+  store <2 x i64> %5, ptr %d
   ret void
 }
 
-define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
+define void @msubv_v16i8(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: msubv_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($7)
@@ -439,17 +439,17 @@ define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
 ; ALL-NEXT:    msubv.b $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w2, 0($4)
-             <16 x i8>* %c) nounwind {
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
-  %3 = load <16 x i8>, <16 x i8>* %c
+             ptr %c) nounwind {
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
+  %3 = load <16 x i8>, ptr %c
   %4 = mul <16 x i8> %2, %3
   %5 = sub <16 x i8> %1, %4
-  store <16 x i8> %5, <16 x i8>* %d
+  store <16 x i8> %5, ptr %d
   ret void
 }
 
-define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
+define void @msubv_v8i16(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: msubv_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($7)
@@ -458,17 +458,17 @@ define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
 ; ALL-NEXT:    msubv.h $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w2, 0($4)
-             <8 x i16>* %c) nounwind {
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
-  %3 = load <8 x i16>, <8 x i16>* %c
+             ptr %c) nounwind {
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
+  %3 = load <8 x i16>, ptr %c
   %4 = mul <8 x i16> %2, %3
   %5 = sub <8 x i16> %1, %4
-  store <8 x i16> %5, <8 x i16>* %d
+  store <8 x i16> %5, ptr %d
   ret void
 }
 
-define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
+define void @msubv_v4i32(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: msubv_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($7)
@@ -477,17 +477,17 @@ define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
 ; ALL-NEXT:    msubv.w $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w2, 0($4)
-             <4 x i32>* %c) nounwind {
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
-  %3 = load <4 x i32>, <4 x i32>* %c
+             ptr %c) nounwind {
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
+  %3 = load <4 x i32>, ptr %c
   %4 = mul <4 x i32> %2, %3
   %5 = sub <4 x i32> %1, %4
-  store <4 x i32> %5, <4 x i32>* %d
+  store <4 x i32> %5, ptr %d
   ret void
 }
 
-define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
+define void @msubv_v2i64(ptr %d, ptr %a, ptr %b,
 ; ALL-LABEL: msubv_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($7)
@@ -496,17 +496,17 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
 ; ALL-NEXT:    msubv.d $w2, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w2, 0($4)
-             <2 x i64>* %c) nounwind {
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
-  %3 = load <2 x i64>, <2 x i64>* %c
+             ptr %c) nounwind {
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
+  %3 = load <2 x i64>, ptr %c
   %4 = mul <2 x i64> %2, %3
   %5 = sub <2 x i64> %1, %4
-  store <2 x i64> %5, <2 x i64>* %d
+  store <2 x i64> %5, ptr %d
   ret void
 }
 
-define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @div_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_s_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -514,14 +514,14 @@ define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    div_s.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = sdiv <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @div_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_s_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -529,14 +529,14 @@ define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    div_s.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = sdiv <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @div_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_s_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -544,14 +544,14 @@ define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    div_s.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = sdiv <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @div_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_s_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -559,14 +559,14 @@ define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    div_s.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = sdiv <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @div_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_u_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -574,14 +574,14 @@ define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    div_u.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = udiv <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @div_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_u_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -589,14 +589,14 @@ define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    div_u.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = udiv <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @div_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_u_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -604,14 +604,14 @@ define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    div_u.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = udiv <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @div_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: div_u_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -619,14 +619,14 @@ define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    div_u.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = udiv <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mod_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_s_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -634,14 +634,14 @@ define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    mod_s.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = srem <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mod_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_s_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -649,14 +649,14 @@ define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    mod_s.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = srem <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mod_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_s_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -664,14 +664,14 @@ define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    mod_s.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = srem <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mod_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_s_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -679,14 +679,14 @@ define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    mod_s.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = srem <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @mod_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_u_v16i8:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.b $w0, 0($6)
@@ -694,14 +694,14 @@ define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; ALL-NEXT:    mod_u.b $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = urem <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @mod_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_u_v8i16:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.h $w0, 0($6)
@@ -709,14 +709,14 @@ define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; ALL-NEXT:    mod_u.h $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = urem <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @mod_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_u_v4i32:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.w $w0, 0($6)
@@ -724,14 +724,14 @@ define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; ALL-NEXT:    mod_u.w $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = urem <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @mod_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; ALL-LABEL: mod_u_v2i64:
 ; ALL:       # %bb.0:
 ; ALL-NEXT:    ld.d $w0, 0($6)
@@ -739,9 +739,9 @@ define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; ALL-NEXT:    mod_u.d $w0, $w1, $w0
 ; ALL-NEXT:    jr $ra
 ; ALL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = urem <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll b/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll
index d3081d7586e9d..b2ce43171aeb1 100644
--- a/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll
@@ -1,444 +1,444 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
-define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @add_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: add_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fadd <4 x float> %1, %2
   ; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size add_v4f32
 }
 
-define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @add_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: add_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fadd <2 x double> %1, %2
   ; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size add_v2f64
 }
 
-define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @sub_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: sub_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fsub <4 x float> %1, %2
   ; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size sub_v4f32
 }
 
-define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @sub_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: sub_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fsub <2 x double> %1, %2
   ; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size sub_v2f64
 }
 
-define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @mul_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: mul_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fmul <4 x float> %1, %2
   ; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mul_v4f32
 }
 
-define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @mul_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: mul_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fmul <2 x double> %1, %2
   ; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mul_v2f64
 }
 
-define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
-                       <4 x float>* %c) nounwind {
+define void @fma_v4f32(ptr %d, ptr %a, ptr %b,
+                       ptr %c) nounwind {
   ; CHECK: fma_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <4 x float>, <4 x float>* %c
+  %3 = load <4 x float>, ptr %c
   ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
   %4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2,
                                               <4 x float> %3)
   ; CHECK-DAG: fmadd.w [[R1]], [[R2]], [[R3]]
-  store <4 x float> %4, <4 x float>* %d
+  store <4 x float> %4, ptr %d
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   ret void
   ; CHECK: .size fma_v4f32
 }
 
-define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
-                       <2 x double>* %c) nounwind {
+define void @fma_v2f64(ptr %d, ptr %a, ptr %b,
+                       ptr %c) nounwind {
   ; CHECK: fma_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <2 x double>, <2 x double>* %c
+  %3 = load <2 x double>, ptr %c
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
   %4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2,
                                                <2 x double> %3)
   ; CHECK-DAG: fmadd.d [[R1]], [[R2]], [[R3]]
-  store <2 x double> %4, <2 x double>* %d
+  store <2 x double> %4, ptr %d
   ; CHECK-DAG: st.d [[R1]], 0($4)
 
   ret void
   ; CHECK: .size fma_v2f64
 }
 
-define void @fmlu_fsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
-                       <4 x float>* %c) nounwind {
+define void @fmlu_fsub_v4f32(ptr %d, ptr %a, ptr %b,
+                       ptr %c) nounwind {
   ; CHECK: fmlu_fsub_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %b
+  %1 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($6)
-  %2 = load <4 x float>, <4 x float>* %c
+  %2 = load <4 x float>, ptr %c
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($7)
   %3 = fmul <4 x float> %1, %2
   ; CHECK-DAG: fmul.w [[R2]], [[R1]], [[R2]]
-  %4 = load <4 x float>, <4 x float>* %a
+  %4 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($5)
   %5 = fsub <4 x float> %4, %3
   ; CHECK-DAG: fsub.w [[R2]], [[R3]], [[R2]]
-  store <4 x float> %5, <4 x float>* %d
+  store <4 x float> %5, ptr %d
   ; CHECK-DAG: st.w [[R2]], 0($4)
 
   ret void
   ; CHECK: .size fmlu_fsub_v4f32
 }
 
-define void @fmul_fsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
-                       <2 x double>* %c) nounwind {
+define void @fmul_fsub_v2f64(ptr %d, ptr %a, ptr %b,
+                       ptr %c) nounwind {
   ; CHECK: fmul_fsub_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %b
+  %1 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($7)
-  %2 = load <2 x double>, <2 x double>* %c
+  %2 = load <2 x double>, ptr %c
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fmul <2 x double> %1, %2
   ; CHECK-DAG: fmul.d [[R1]], [[R2]], [[R1]]
-  %4 = load <2 x double>, <2 x double>* %a
+  %4 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($5)
   %5 = fsub <2 x double> %4, %3
   ; CHECK-DAG: fsub.d [[R1]], [[R3]], [[R1]]
-  store <2 x double> %5, <2 x double>* %d
+  store <2 x double> %5, ptr %d
   ; CHECK-DAG: st.d [[R1]], 0($4)
 
   ret void
   ; CHECK: .size fmul_fsub_v2f64
 }
 
-define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @fdiv_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: fdiv_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fdiv <4 x float> %1, %2
   ; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fdiv_v4f32
 }
 
-define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @fdiv_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: fdiv_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fdiv <2 x double> %1, %2
   ; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fdiv_v2f64
 }
 
-define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fabs_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: fabs_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1)
   ; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x float> %2, <4 x float>* %c
+  store <4 x float> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fabs_v4f32
 }
 
-define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fabs_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: fabs_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1)
   ; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <2 x double> %2, <2 x double>* %c
+  store <2 x double> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fabs_v2f64
 }
 
-define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fexp2_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: fexp2_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
   ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
   ; CHECK-DAG: fexp2.w [[R4:\$w[0-9]+]], [[R3]], [[R1]]
-  store <4 x float> %2, <4 x float>* %c
+  store <4 x float> %2, ptr %c
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size fexp2_v4f32
 }
 
-define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fexp2_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: fexp2_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
   ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: ffint_u.d [[R4:\$w[0-9]+]], [[R3]]
   ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
-  store <2 x double> %2, <2 x double>* %c
+  store <2 x double> %2, ptr %c
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size fexp2_v2f64
 }
 
-define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fexp2_v4f32_2(ptr %c, ptr %a) nounwind {
   ; CHECK: fexp2_v4f32_2:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1)
   %3 = fmul <4 x float> <float 2.0, float 2.0, float 2.0, float 2.0>, %2
   ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: ffint_u.w [[R4:\$w[0-9]+]], [[R3]]
   ; CHECK-DAG: fexp2.w [[R5:\$w[0-9]+]], [[R4]], [[R1]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R5]], 0($4)
 
   ret void
   ; CHECK: .size fexp2_v4f32_2
 }
 
-define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fexp2_v2f64_2(ptr %c, ptr %a) nounwind {
   ; CHECK: fexp2_v2f64_2:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1)
   %3 = fmul <2 x double> <double 2.0, double 2.0>, %2
   ; CHECK-DAG: ldi.d [[R2:\$w[0-9]+]], 1
   ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R2]]
   ; CHECK-DAG: fexp2.d [[R4:\$w[0-9]+]], [[R3]], [[R1]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size fexp2_v2f64_2
 }
 
-define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind {
+define void @fsqrt_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: fsqrt_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1)
   ; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x float> %2, <4 x float>* %c
+  store <4 x float> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fsqrt_v4f32
 }
 
-define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind {
+define void @fsqrt_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: fsqrt_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1)
   ; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x double> %2, <2 x double>* %c
+  store <2 x double> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size fsqrt_v2f64
 }
 
-define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+define void @ffint_u_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: ffint_u_v4f32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = uitofp <4 x i32> %1 to <4 x float>
   ; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x float> %2, <4 x float>* %c
+  store <4 x float> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ffint_u_v4f32
 }
 
-define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+define void @ffint_u_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: ffint_u_v2f64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = uitofp <2 x i64> %1 to <2 x double>
   ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x double> %2, <2 x double>* %c
+  store <2 x double> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ffint_u_v2f64
 }
 
-define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind {
+define void @ffint_s_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: ffint_s_v4f32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = sitofp <4 x i32> %1 to <4 x float>
   ; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x float> %2, <4 x float>* %c
+  store <4 x float> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ffint_s_v4f32
 }
 
-define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind {
+define void @ffint_s_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: ffint_s_v2f64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = sitofp <2 x i64> %1 to <2 x double>
   ; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x double> %2, <2 x double>* %c
+  store <2 x double> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ffint_s_v2f64
 }
 
-define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+define void @ftrunc_u_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: ftrunc_u_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = fptoui <4 x float> %1 to <4 x i32>
   ; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ftrunc_u_v4f32
 }
 
-define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+define void @ftrunc_u_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: ftrunc_u_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = fptoui <2 x double> %1 to <2 x i64>
   ; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ftrunc_u_v2f64
 }
 
-define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind {
+define void @ftrunc_s_v4f32(ptr %c, ptr %a) nounwind {
   ; CHECK: ftrunc_s_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = fptosi <4 x float> %1 to <4 x i32>
   ; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ftrunc_s_v4f32
 }
 
-define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind {
+define void @ftrunc_s_v2f64(ptr %c, ptr %a) nounwind {
   ; CHECK: ftrunc_s_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = fptosi <2 x double> %1 to <2 x i64>
   ; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/avoid_vector_shift_combines.ll b/llvm/test/CodeGen/Mips/msa/avoid_vector_shift_combines.ll
index 04633cb575ce5..ca22eb941215f 100644
--- a/llvm/test/CodeGen/Mips/msa/avoid_vector_shift_combines.ll
+++ b/llvm/test/CodeGen/Mips/msa/avoid_vector_shift_combines.ll
@@ -10,7 +10,7 @@ declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32)
 
 ; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
 ; MASK_TYPE1 = C2-C1 0s | 1s | ends with C1 0s
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
@@ -27,15 +27,15 @@ define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64(<2 x i64>*
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a
+  %0 = load <2 x i64>, ptr %a
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 52)
   %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 51)
-  store <2 x i64> %2, <2 x i64>* %b
+  store <2 x i64> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
@@ -52,16 +52,16 @@ define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i64_long(<2 x
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a
+  %0 = load <2 x i64>, ptr %a
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 6)
   %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 4)
-  store <2 x i64> %2, <2 x i64>* %b
+  store <2 x i64> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (srl x, c1), c2) -> (and (shl x, (sub c1, c2), MASK) if C1 >= C2
 ; MASK_TYPE2 = 1s | C1 zeros
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
@@ -78,15 +78,15 @@ define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type2_i32(<2 x i64>*
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a
+  %0 = load <2 x i64>, ptr %a
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 4)
   %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 6)
-  store <2 x i64> %2, <2 x i64>* %b
+  store <2 x i64> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 < C2
-define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
@@ -103,15 +103,15 @@ define void @avoid_to_combine_shifts_to_shift_plus_and_mask_type1_i32_long(<4 x
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a
+  %0 = load <4 x i32>, ptr %a
   %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
   %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 3)
-  store <4 x i32> %2, <4 x i32>* %b
+  store <4 x i32> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64_long:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
@@ -128,15 +128,15 @@ define void @avoid_to_combine_shifts_to_and_mask_type2_i64_long(<2 x i64>* %a, <
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a
+  %0 = load <2 x i64>, ptr %a
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 38)
   %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 38)
-  store <2 x i64> %2, <2 x i64>* %b
+  store <2 x i64> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type2_i64(<2 x i64>* %a, <2 x i64>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type2_i64(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type2_i64:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.d $w0, 0($4)
@@ -153,15 +153,15 @@ define void @avoid_to_combine_shifts_to_and_mask_type2_i64(<2 x i64>* %a, <2 x i
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.d $w0, 0($5)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %a
+  %0 = load <2 x i64>, ptr %a
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 3)
   %2 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %1, i32 3)
-  store <2 x i64> %2, <2 x i64>* %b
+  store <2 x i64> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_a:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
@@ -178,15 +178,15 @@ define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_a(<4 x i32>* %a,
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a
+  %0 = load <4 x i32>, ptr %a
   %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 5)
   %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 5)
-  store <4 x i32> %2, <4 x i32>* %b
+  store <4 x i32> %2, ptr %b
   ret void
 }
 
 ; do not fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
-define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(<4 x i32>* %a, <4 x i32>* %b) {
+define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(ptr %a, ptr %b) {
 ; MIPSEL64R6-LABEL: avoid_to_combine_shifts_to_and_mask_type1_long_i32_b:
 ; MIPSEL64R6:       # %bb.0: # %entry
 ; MIPSEL64R6-NEXT:    ld.w $w0, 0($4)
@@ -203,9 +203,9 @@ define void @avoid_to_combine_shifts_to_and_mask_type1_long_i32_b(<4 x i32>* %a,
 ; MIPSEL32R5-NEXT:    jr $ra
 ; MIPSEL32R5-NEXT:    st.w $w0, 0($5)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %a
+  %0 = load <4 x i32>, ptr %a
   %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 30)
   %2 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %1, i32 30)
-  store <4 x i32> %2, <4 x i32>* %b
+  store <4 x i32> %2, ptr %b
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/basic_operations.ll b/llvm/test/CodeGen/Mips/msa/basic_operations.ll
index e55f821392c25..397fa7d69eb6a 100644
--- a/llvm/test/CodeGen/Mips/msa/basic_operations.ll
+++ b/llvm/test/CodeGen/Mips/msa/basic_operations.ll
@@ -213,13 +213,13 @@ define void @const_v16i8() nounwind {
 ; N64-LE-NEXT:    fill.d $w0, $1
 ; N64-LE-NEXT:    jr $ra
 ; N64-LE-NEXT:    st.b $w0, 0($2)
-  store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, <16 x i8>*@v16i8
-  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, <16 x i8>*@v16i8
+  store volatile <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 31>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0, i8 1, i8 0>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4, i8 1, i8 2, i8 3, i8 4>, ptr @v16i8
+  store volatile <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>, ptr @v16i8
   ret void
 }
 
@@ -386,12 +386,12 @@ define void @const_v8i16() nounwind {
 ; N64-LE-NEXT:    fill.d $w0, $1
 ; N64-LE-NEXT:    jr $ra
 ; N64-LE-NEXT:    st.h $w0, 0($2)
-  store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16>*@v8i16
-  store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16>*@v8i16
-  store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, <8 x i16>*@v8i16
-  store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, <8 x i16>*@v8i16
-  store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, <8 x i16>*@v8i16
-  store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, <8 x i16>*@v8i16
+  store volatile <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, ptr @v8i16
+  store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, ptr @v8i16
+  store volatile <8 x i16> <i16 1, i16 1, i16 1, i16 2, i16 1, i16 1, i16 1, i16 31>, ptr @v8i16
+  store volatile <8 x i16> <i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028, i16 1028>, ptr @v8i16
+  store volatile <8 x i16> <i16 1, i16 2, i16 1, i16 2, i16 1, i16 2, i16 1, i16 2>, ptr @v8i16
+  store volatile <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>, ptr @v8i16
   ret void
 }
 
@@ -571,13 +571,13 @@ define void @const_v4i32() nounwind {
 ; N64-LE-NEXT:    ld.w $w0, 0($1)
 ; N64-LE-NEXT:    jr $ra
 ; N64-LE-NEXT:    st.w $w0, 0($2)
-  store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, <4 x i32>*@v4i32
-  store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, <4 x i32>*@v4i32
+  store volatile <4 x i32> <i32 0, i32 0, i32 0, i32 0>, ptr @v4i32
+  store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 1>, ptr @v4i32
+  store volatile <4 x i32> <i32 1, i32 1, i32 1, i32 31>, ptr @v4i32
+  store volatile <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>, ptr @v4i32
+  store volatile <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>, ptr @v4i32
+  store volatile <4 x i32> <i32 1, i32 2, i32 1, i32 2>, ptr @v4i32
+  store volatile <4 x i32> <i32 3, i32 4, i32 5, i32 6>, ptr @v4i32
   ret void
 }
 
@@ -659,13 +659,13 @@ define void @const_v2i64() nounwind {
 ; N64-NEXT:    ld.d $w0, 0($1)
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.d $w0, 0($2)
-  store volatile <2 x i64> <i64 0, i64 0>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 4294967297, i64 4294967297>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 1, i64 1>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 1, i64 31>, <2 x i64>*@v2i64
-  store volatile <2 x i64> <i64 3, i64 4>, <2 x i64>*@v2i64
+  store volatile <2 x i64> <i64 0, i64 0>, ptr @v2i64
+  store volatile <2 x i64> <i64 72340172838076673, i64 72340172838076673>, ptr @v2i64
+  store volatile <2 x i64> <i64 281479271743489, i64 281479271743489>, ptr @v2i64
+  store volatile <2 x i64> <i64 4294967297, i64 4294967297>, ptr @v2i64
+  store volatile <2 x i64> <i64 1, i64 1>, ptr @v2i64
+  store volatile <2 x i64> <i64 1, i64 31>, ptr @v2i64
+  store volatile <2 x i64> <i64 3, i64 4>, ptr @v2i64
   ret void
 }
 
@@ -764,7 +764,7 @@ define void @nonconst_v16i8(i8 signext %a, i8 signext %b, i8 signext %c, i8 sign
   %14 = insertelement <16 x i8> %13, i8 %h, i32 13
   %15 = insertelement <16 x i8> %14, i8 %h, i32 14
   %16 = insertelement <16 x i8> %15, i8 %h, i32 15
-  store volatile <16 x i8> %16, <16 x i8>*@v16i8
+  store volatile <16 x i8> %16, ptr @v16i8
   ret void
 }
 
@@ -831,7 +831,7 @@ define void @nonconst_v8i16(i16 signext %a, i16 signext %b, i16 signext %c, i16
   %6 = insertelement <8 x i16> %5, i16 %f, i32 5
   %7 = insertelement <8 x i16> %6, i16 %g, i32 6
   %8 = insertelement <8 x i16> %7, i16 %h, i32 7
-  store volatile <8 x i16> %8, <8 x i16>*@v8i16
+  store volatile <8 x i16> %8, ptr @v8i16
   ret void
 }
 
@@ -878,7 +878,7 @@ define void @nonconst_v4i32(i32 signext %a, i32 signext %b, i32 signext %c, i32
   %2 = insertelement <4 x i32> %1, i32 %b, i32 1
   %3 = insertelement <4 x i32> %2, i32 %c, i32 2
   %4 = insertelement <4 x i32> %3, i32 %d, i32 3
-  store volatile <4 x i32> %4, <4 x i32>*@v4i32
+  store volatile <4 x i32> %4, ptr @v4i32
   ret void
 }
 
@@ -919,7 +919,7 @@ define void @nonconst_v2i64(i64 signext %a, i64 signext %b) nounwind {
 ; N64-NEXT:    st.d $w0, 0($1)
   %1 = insertelement <2 x i64> undef, i64 %a, i32 0
   %2 = insertelement <2 x i64> %1, i64 %b, i32 1
-  store volatile <2 x i64> %2, <2 x i64>*@v2i64
+  store volatile <2 x i64> %2, ptr @v2i64
   ret void
 }
 
@@ -959,7 +959,7 @@ define i32 @extract_sext_v16i8() nounwind {
 ; N64-NEXT:    copy_s.b $1, $w0[1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    seb $2, $1
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
+  %1 = load <16 x i8>, ptr @v16i8
   %2 = add <16 x i8> %1, %1
   %3 = extractelement <16 x i8> %2, i32 1
   %4 = sext i8 %3 to i32
@@ -1002,7 +1002,7 @@ define i32 @extract_sext_v8i16() nounwind {
 ; N64-NEXT:    copy_s.h $1, $w0[1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    seh $2, $1
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
+  %1 = load <8 x i16>, ptr @v8i16
   %2 = add <8 x i16> %1, %1
   %3 = extractelement <8 x i16> %2, i32 1
   %4 = sext i16 %3 to i32
@@ -1042,7 +1042,7 @@ define i32 @extract_sext_v4i32() nounwind {
 ; N64-NEXT:    addv.w $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_s.w $2, $w0[1]
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
+  %1 = load <4 x i32>, ptr @v4i32
   %2 = add <4 x i32> %1, %1
   %3 = extractelement <4 x i32> %2, i32 1
   ret i32 %3
@@ -1095,7 +1095,7 @@ define i64 @extract_sext_v2i64() nounwind {
 ; N64-NEXT:    addv.d $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_s.d $2, $w0[1]
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
+  %1 = load <2 x i64>, ptr @v2i64
   %2 = add <2 x i64> %1, %1
   %3 = extractelement <2 x i64> %2, i32 1
   ret i64 %3
@@ -1134,7 +1134,7 @@ define i32 @extract_zext_v16i8() nounwind {
 ; N64-NEXT:    addv.b $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_u.b $2, $w0[1]
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
+  %1 = load <16 x i8>, ptr @v16i8
   %2 = add <16 x i8> %1, %1
   %3 = extractelement <16 x i8> %2, i32 1
   %4 = zext i8 %3 to i32
@@ -1174,7 +1174,7 @@ define i32 @extract_zext_v8i16() nounwind {
 ; N64-NEXT:    addv.h $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_u.h $2, $w0[1]
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
+  %1 = load <8 x i16>, ptr @v8i16
   %2 = add <8 x i16> %1, %1
   %3 = extractelement <8 x i16> %2, i32 1
   %4 = zext i16 %3 to i32
@@ -1214,7 +1214,7 @@ define i32 @extract_zext_v4i32() nounwind {
 ; N64-NEXT:    addv.w $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_s.w $2, $w0[1]
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
+  %1 = load <4 x i32>, ptr @v4i32
   %2 = add <4 x i32> %1, %1
   %3 = extractelement <4 x i32> %2, i32 1
   ret i32 %3
@@ -1267,7 +1267,7 @@ define i64 @extract_zext_v2i64() nounwind {
 ; N64-NEXT:    addv.d $w0, $w0, $w0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    copy_s.d $2, $w0[1]
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
+  %1 = load <2 x i64>, ptr @v2i64
   %2 = add <2 x i64> %1, %1
   %3 = extractelement <2 x i64> %2, i32 1
   ret i64 %3
@@ -1321,9 +1321,9 @@ define i32 @extract_sext_v16i8_vidx() nounwind {
 ; N64-NEXT:    sra $1, $1, 24
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    seb $2, $1
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
+  %1 = load <16 x i8>, ptr @v16i8
   %2 = add <16 x i8> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <16 x i8> %2, i32 %3
   %5 = sext i8 %4 to i32
   ret i32 %5
@@ -1377,9 +1377,9 @@ define i32 @extract_sext_v8i16_vidx() nounwind {
 ; N64-NEXT:    sra $1, $1, 16
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    seh $2, $1
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
+  %1 = load <8 x i16>, ptr @v8i16
   %2 = add <8 x i16> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <8 x i16> %2, i32 %3
   %5 = sext i16 %4 to i32
   ret i32 %5
@@ -1427,9 +1427,9 @@ define i32 @extract_sext_v4i32_vidx() nounwind {
 ; N64-NEXT:    splat.w $w0, $w0[$1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    mfc1 $2, $f0
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
+  %1 = load <4 x i32>, ptr @v4i32
   %2 = add <4 x i32> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <4 x i32> %2, i32 %3
   ret i32 %4
 }
@@ -1499,9 +1499,9 @@ define i64 @extract_sext_v2i64_vidx() nounwind {
 ; N64-NEXT:    splat.d $w0, $w0[$1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    dmfc1 $2, $f0
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
+  %1 = load <2 x i64>, ptr @v2i64
   %2 = add <2 x i64> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <2 x i64> %2, i32 %3
   ret i64 %4
 }
@@ -1551,9 +1551,9 @@ define i32 @extract_zext_v16i8_vidx() nounwind {
 ; N64-NEXT:    mfc1 $1, $f0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    srl $2, $1, 24
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
+  %1 = load <16 x i8>, ptr @v16i8
   %2 = add <16 x i8> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <16 x i8> %2, i32 %3
   %5 = zext i8 %4 to i32
   ret i32 %5
@@ -1604,9 +1604,9 @@ define i32 @extract_zext_v8i16_vidx() nounwind {
 ; N64-NEXT:    mfc1 $1, $f0
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    srl $2, $1, 16
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
+  %1 = load <8 x i16>, ptr @v8i16
   %2 = add <8 x i16> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <8 x i16> %2, i32 %3
   %5 = zext i16 %4 to i32
   ret i32 %5
@@ -1654,9 +1654,9 @@ define i32 @extract_zext_v4i32_vidx() nounwind {
 ; N64-NEXT:    splat.w $w0, $w0[$1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    mfc1 $2, $f0
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
+  %1 = load <4 x i32>, ptr @v4i32
   %2 = add <4 x i32> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <4 x i32> %2, i32 %3
   ret i32 %4
 }
@@ -1726,9 +1726,9 @@ define i64 @extract_zext_v2i64_vidx() nounwind {
 ; N64-NEXT:    splat.d $w0, $w0[$1]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    dmfc1 $2, $f0
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
+  %1 = load <2 x i64>, ptr @v2i64
   %2 = add <2 x i64> %1, %1
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   %4 = extractelement <2 x i64> %2, i32 %3
   ret i64 %4
 }
@@ -1766,12 +1766,12 @@ define void @insert_v16i8(i32 signext %a) nounwind {
 ; N64-NEXT:    insert.b $w0[1], $4
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.b $w0, 0($1)
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
+  %1 = load <16 x i8>, ptr @v16i8
   %a2 = trunc i32 %a to i8
   %a3 = sext i8 %a2 to i32
   %a4 = trunc i32 %a3 to i8
   %2 = insertelement <16 x i8> %1, i8 %a4, i32 1
-  store <16 x i8> %2, <16 x i8>* @v16i8
+  store <16 x i8> %2, ptr @v16i8
   ret void
 }
 
@@ -1808,12 +1808,12 @@ define void @insert_v8i16(i32 signext %a) nounwind {
 ; N64-NEXT:    insert.h $w0[1], $4
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.h $w0, 0($1)
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
+  %1 = load <8 x i16>, ptr @v8i16
   %a2 = trunc i32 %a to i16
   %a3 = sext i16 %a2 to i32
   %a4 = trunc i32 %a3 to i16
   %2 = insertelement <8 x i16> %1, i16 %a4, i32 1
-  store <8 x i16> %2, <8 x i16>* @v8i16
+  store <8 x i16> %2, ptr @v8i16
   ret void
 }
 
@@ -1850,9 +1850,9 @@ define void @insert_v4i32(i32 signext %a) nounwind {
 ; N64-NEXT:    insert.w $w0[1], $4
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.w $w0, 0($1)
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
+  %1 = load <4 x i32>, ptr @v4i32
   %2 = insertelement <4 x i32> %1, i32 %a, i32 1
-  store <4 x i32> %2, <4 x i32>* @v4i32
+  store <4 x i32> %2, ptr @v4i32
   ret void
 }
 define void @insert_v2i64(i64 signext %a) nounwind {
@@ -1889,9 +1889,9 @@ define void @insert_v2i64(i64 signext %a) nounwind {
 ; N64-NEXT:    insert.d $w0[1], $4
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.d $w0, 0($1)
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
+  %1 = load <2 x i64>, ptr @v2i64
   %2 = insertelement <2 x i64> %1, i64 %a, i32 1
-  store <2 x i64> %2, <2 x i64>* @v2i64
+  store <2 x i64> %2, ptr @v2i64
   ret void
 }
 
@@ -1943,13 +1943,13 @@ define void @insert_v16i8_vidx(i32 signext %a) nounwind {
 ; N64-NEXT:    sld.b $w0, $w0[$2]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.b $w0, 0($1)
-  %1 = load <16 x i8>, <16 x i8>* @v16i8
-  %2 = load i32, i32* @i32
+  %1 = load <16 x i8>, ptr @v16i8
+  %2 = load i32, ptr @i32
   %a2 = trunc i32 %a to i8
   %a3 = sext i8 %a2 to i32
   %a4 = trunc i32 %a3 to i8
   %3 = insertelement <16 x i8> %1, i8 %a4, i32 %2
-  store <16 x i8> %3, <16 x i8>* @v16i8
+  store <16 x i8> %3, ptr @v16i8
   ret void
 }
 
@@ -2004,13 +2004,13 @@ define void @insert_v8i16_vidx(i32 signext %a) nounwind {
 ; N64-NEXT:    sld.b $w0, $w0[$2]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.h $w0, 0($1)
-  %1 = load <8 x i16>, <8 x i16>* @v8i16
-  %2 = load i32, i32* @i32
+  %1 = load <8 x i16>, ptr @v8i16
+  %2 = load i32, ptr @i32
   %a2 = trunc i32 %a to i16
   %a3 = sext i16 %a2 to i32
   %a4 = trunc i32 %a3 to i16
   %3 = insertelement <8 x i16> %1, i16 %a4, i32 %2
-  store <8 x i16> %3, <8 x i16>* @v8i16
+  store <8 x i16> %3, ptr @v8i16
   ret void
 }
 
@@ -2065,10 +2065,10 @@ define void @insert_v4i32_vidx(i32 signext %a) nounwind {
 ; N64-NEXT:    sld.b $w0, $w0[$2]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.w $w0, 0($1)
-  %1 = load <4 x i32>, <4 x i32>* @v4i32
-  %2 = load i32, i32* @i32
+  %1 = load <4 x i32>, ptr @v4i32
+  %2 = load i32, ptr @i32
   %3 = insertelement <4 x i32> %1, i32 %a, i32 %2
-  store <4 x i32> %3, <4 x i32>* @v4i32
+  store <4 x i32> %3, ptr @v4i32
   ret void
 }
 
@@ -2134,10 +2134,10 @@ define void @insert_v2i64_vidx(i64 signext %a) nounwind {
 ; N64-NEXT:    sld.b $w0, $w0[$2]
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    st.d $w0, 0($1)
-  %1 = load <2 x i64>, <2 x i64>* @v2i64
-  %2 = load i32, i32* @i32
+  %1 = load <2 x i64>, ptr @v2i64
+  %2 = load i32, ptr @i32
   %3 = insertelement <2 x i64> %1, i64 %a, i32 %2
-  store <2 x i64> %3, <2 x i64>* @v2i64
+  store <2 x i64> %3, ptr @v2i64
   ret void
 }
 
@@ -2181,6 +2181,6 @@ define void @truncstore() nounwind {
 ; N64-NEXT:    sb $2, 1($1)
 ; N64-NEXT:    jr $ra
 ; N64-NEXT:    sb $2, 0($1)
-  store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i8>*@v4i8
+  store volatile <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, ptr @v4i8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll b/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll
index 1359eb2ecdd6c..740e6169f81f9 100644
--- a/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll
@@ -20,31 +20,31 @@
 define void @const_v4f32() nounwind {
   ; ALL-LABEL: const_v4f32:
 
-  store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, ptr @v4f32
   ; ALL: ldi.b  [[R1:\$w[0-9]+]], 0
 
-  store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, ptr @v4f32
   ; ALL: lui     [[R1:\$[0-9]+]], 16256
   ; ALL: fill.w  [[R2:\$w[0-9]+]], [[R1]]
 
-  store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 1.0, float 1.0, float 1.0, float 31.0>, ptr @v4f32
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.w  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 65537.0, float 65537.0, float 65537.0, float 65537.0>, ptr @v4f32
   ; ALL: lui     [[R1:\$[0-9]+]], 18304
   ; ALL: ori     [[R2:\$[0-9]+]], [[R1]], 128
   ; ALL: fill.w  [[R3:\$w[0-9]+]], [[R2]]
 
-  store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 1.0, float 2.0, float 1.0, float 2.0>, ptr @v4f32
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.w  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, <4 x float>*@v4f32
+  store volatile <4 x float> <float 3.0, float 4.0, float 5.0, float 6.0>, ptr @v4f32
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
@@ -56,40 +56,40 @@ define void @const_v4f32() nounwind {
 define void @const_v2f64() nounwind {
   ; ALL-LABEL: const_v2f64:
 
-  store volatile <2 x double> <double 0.0, double 0.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 0.0, double 0.0>, ptr @v2f64
   ; ALL: ldi.b  [[R1:\$w[0-9]+]], 0
 
-  store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 72340172838076673.0, double 72340172838076673.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.d  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 281479271743489.0, double 281479271743489.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.d  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 4294967297.0, double 4294967297.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.d  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <2 x double> <double 1.0, double 1.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 1.0, double 1.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.d  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <2 x double> <double 1.0, double 31.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 1.0, double 31.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; ALL: ld.d  [[R1:\$w[0-9]+]], 0([[G_PTR]])
 
-  store volatile <2 x double> <double 3.0, double 4.0>, <2 x double>*@v2f64
+  store volatile <2 x double> <double 3.0, double 4.0>, ptr @v2f64
   ; O32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %lo($
   ; N32: addiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
   ; N64: daddiu [[G_PTR:\$[0-9]+]], {{.*}}, %got_ofst(.L
@@ -101,12 +101,12 @@ define void @const_v2f64() nounwind {
 define void @nonconst_v4f32() nounwind {
   ; ALL-LABEL: nonconst_v4f32:
 
-  %1 = load float , float *@f32
+  %1 = load float , ptr @f32
   %2 = insertelement <4 x float> undef, float %1, i32 0
   %3 = insertelement <4 x float> %2, float %1, i32 1
   %4 = insertelement <4 x float> %3, float %1, i32 2
   %5 = insertelement <4 x float> %4, float %1, i32 3
-  store volatile <4 x float> %5, <4 x float>*@v4f32
+  store volatile <4 x float> %5, ptr @v4f32
   ; ALL: lwc1 $f[[R1:[0-9]+]], 0(
   ; ALL: splati.w [[R2:\$w[0-9]+]], $w[[R1]]
 
@@ -116,10 +116,10 @@ define void @nonconst_v4f32() nounwind {
 define void @nonconst_v2f64() nounwind {
   ; ALL-LABEL: nonconst_v2f64:
 
-  %1 = load double , double *@f64
+  %1 = load double , ptr @f64
   %2 = insertelement <2 x double> undef, double %1, i32 0
   %3 = insertelement <2 x double> %2, double %1, i32 1
-  store volatile <2 x double> %3, <2 x double>*@v2f64
+  store volatile <2 x double> %3, ptr @v2f64
   ; ALL: ldc1 $f[[R1:[0-9]+]], 0(
   ; ALL: splati.d [[R2:\$w[0-9]+]], $w[[R1]]
 
@@ -129,7 +129,7 @@ define void @nonconst_v2f64() nounwind {
 define float @extract_v4f32() nounwind {
   ; ALL-LABEL: extract_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
 
   %2 = fadd <4 x float> %1, %1
@@ -146,7 +146,7 @@ define float @extract_v4f32() nounwind {
 define float @extract_v4f32_elt0() nounwind {
   ; ALL-LABEL: extract_v4f32_elt0:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
 
   %2 = fadd <4 x float> %1, %1
@@ -163,7 +163,7 @@ define float @extract_v4f32_elt0() nounwind {
 define float @extract_v4f32_elt2() nounwind {
   ; ALL-LABEL: extract_v4f32_elt2:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
 
   %2 = fadd <4 x float> %1, %1
@@ -180,7 +180,7 @@ define float @extract_v4f32_elt2() nounwind {
 define float @extract_v4f32_vidx() nounwind {
   ; ALL-LABEL: extract_v4f32_vidx:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
   ; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
   ; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
@@ -189,7 +189,7 @@ define float @extract_v4f32_vidx() nounwind {
   %2 = fadd <4 x float> %1, %1
   ; ALL-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]]
 
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   ; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
   ; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
   ; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
@@ -203,7 +203,7 @@ define float @extract_v4f32_vidx() nounwind {
 define double @extract_v2f64() nounwind {
   ; ALL-LABEL: extract_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* @v2f64
+  %1 = load <2 x double>, ptr @v2f64
   ; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
 
   %2 = fadd <2 x double> %1, %1
@@ -225,7 +225,7 @@ define double @extract_v2f64() nounwind {
 define double @extract_v2f64_elt0() nounwind {
   ; ALL-LABEL: extract_v2f64_elt0:
 
-  %1 = load <2 x double>, <2 x double>* @v2f64
+  %1 = load <2 x double>, ptr @v2f64
   ; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
 
   %2 = fadd <2 x double> %1, %1
@@ -245,7 +245,7 @@ define double @extract_v2f64_elt0() nounwind {
 define double @extract_v2f64_vidx() nounwind {
   ; ALL-LABEL: extract_v2f64_vidx:
 
-  %1 = load <2 x double>, <2 x double>* @v2f64
+  %1 = load <2 x double>, ptr @v2f64
   ; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
   ; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
   ; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
@@ -254,7 +254,7 @@ define double @extract_v2f64_vidx() nounwind {
   %2 = fadd <2 x double> %1, %1
   ; ALL-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]]
 
-  %3 = load i32, i32* @i32
+  %3 = load i32, ptr @i32
   ; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
   ; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
   ; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
@@ -268,14 +268,14 @@ define double @extract_v2f64_vidx() nounwind {
 define void @insert_v4f32(float %a) nounwind {
   ; ALL-LABEL: insert_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; ALL-DAG: ld.w [[R1:\$w[0-9]+]],
 
   %2 = insertelement <4 x float> %1, float %a, i32 1
   ; float argument passed in $f12
   ; ALL-DAG: insve.w [[R1]][1], $w12[0]
 
-  store <4 x float> %2, <4 x float>* @v4f32
+  store <4 x float> %2, ptr @v4f32
   ; ALL-DAG: st.w [[R1]]
 
   ret void
@@ -284,14 +284,14 @@ define void @insert_v4f32(float %a) nounwind {
 define void @insert_v2f64(double %a) nounwind {
   ; ALL-LABEL: insert_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* @v2f64
+  %1 = load <2 x double>, ptr @v2f64
   ; ALL-DAG: ld.d [[R1:\$w[0-9]+]],
 
   %2 = insertelement <2 x double> %1, double %a, i32 1
   ; double argument passed in $f12
   ; ALL-DAG: insve.d [[R1]][1], $w12[0]
 
-  store <2 x double> %2, <2 x double>* @v2f64
+  store <2 x double> %2, ptr @v2f64
   ; ALL-DAG: st.d [[R1]]
 
   ret void
@@ -300,13 +300,13 @@ define void @insert_v2f64(double %a) nounwind {
 define void @insert_v4f32_vidx(float %a) nounwind {
   ; ALL-LABEL: insert_v4f32_vidx:
 
-  %1 = load <4 x float>, <4 x float>* @v4f32
+  %1 = load <4 x float>, ptr @v4f32
   ; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)(
   ; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
   ; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v4f32)(
   ; ALL-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]])
 
-  %2 = load i32, i32* @i32
+  %2 = load i32, ptr @i32
   ; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
   ; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
   ; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
@@ -319,7 +319,7 @@ define void @insert_v4f32_vidx(float %a) nounwind {
   ; ALL-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
   ; ALL-DAG: sld.b [[R1]], [[R1]][[[NIDX]]]
 
-  store <4 x float> %3, <4 x float>* @v4f32
+  store <4 x float> %3, ptr @v4f32
   ; ALL-DAG: st.w [[R1]]
 
   ret void
@@ -328,13 +328,13 @@ define void @insert_v4f32_vidx(float %a) nounwind {
 define void @insert_v2f64_vidx(double %a) nounwind {
   ; ALL-LABEL: insert_v2f64_vidx:
 
-  %1 = load <2 x double>, <2 x double>* @v2f64
+  %1 = load <2 x double>, ptr @v2f64
   ; O32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)(
   ; N32-DAG: lw [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
   ; N64-DAG: ld [[PTR_V:\$[0-9]+]], %got_disp(v2f64)(
   ; ALL-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]])
 
-  %2 = load i32, i32* @i32
+  %2 = load i32, ptr @i32
   ; O32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)(
   ; N32-DAG: lw [[PTR_I:\$[0-9]+]], %got_disp(i32)(
   ; N64-DAG: ld [[PTR_I:\$[0-9]+]], %got_disp(i32)(
@@ -347,7 +347,7 @@ define void @insert_v2f64_vidx(double %a) nounwind {
   ; ALL-DAG: neg [[NIDX:\$[0-9]+]], [[BIDX]]
   ; ALL-DAG: sld.b [[R1]], [[R1]][[[NIDX]]]
 
-  store <2 x double> %3, <2 x double>* @v2f64
+  store <2 x double> %3, ptr @v2f64
   ; ALL-DAG: st.d [[R1]]
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/bit.ll b/llvm/test/CodeGen/Mips/msa/bit.ll
index 2a4632f0807cd..1b2012cec5f5a 100644
--- a/llvm/test/CodeGen/Mips/msa/bit.ll
+++ b/llvm/test/CodeGen/Mips/msa/bit.ll
@@ -8,9 +8,9 @@
 
 define void @llvm_mips_sat_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_sat_s_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_sat_s_b_RES
   ret void
 }
 
@@ -27,9 +27,9 @@ declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_sat_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_sat_s_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_sat_s_h_RES
   ret void
 }
 
@@ -46,9 +46,9 @@ declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_sat_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_sat_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_sat_s_w_RES
   ret void
 }
 
@@ -65,9 +65,9 @@ declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_sat_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_sat_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_sat_s_d_RES
   ret void
 }
 
@@ -84,9 +84,9 @@ declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_sat_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_sat_u_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_sat_u_b_RES
   ret void
 }
 
@@ -103,9 +103,9 @@ declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_sat_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_sat_u_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_sat_u_h_RES
   ret void
 }
 
@@ -122,9 +122,9 @@ declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_sat_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_sat_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_sat_u_w_RES
   ret void
 }
 
@@ -141,9 +141,9 @@ declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_sat_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_sat_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_sat_u_d_RES
   ret void
 }
 
@@ -160,9 +160,9 @@ declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_slli_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_slli_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_slli_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_slli_b_RES
   ret void
 }
 
@@ -179,9 +179,9 @@ declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_slli_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_slli_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_slli_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_slli_h_RES
   ret void
 }
 
@@ -198,9 +198,9 @@ declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_slli_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_slli_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_slli_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_slli_w_RES
   ret void
 }
 
@@ -217,9 +217,9 @@ declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_slli_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_slli_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_slli_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_slli_d_RES
   ret void
 }
 
@@ -236,9 +236,9 @@ declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_srai_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srai_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_srai_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_srai_b_RES
   ret void
 }
 
@@ -255,9 +255,9 @@ declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_srai_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srai_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_srai_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_srai_h_RES
   ret void
 }
 
@@ -274,9 +274,9 @@ declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_srai_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srai_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_srai_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_srai_w_RES
   ret void
 }
 
@@ -293,9 +293,9 @@ declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_srai_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srai_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_srai_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_srai_d_RES
   ret void
 }
 
@@ -312,9 +312,9 @@ declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_srari_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srari_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_srari_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_srari_b_RES
   ret void
 }
 
@@ -331,9 +331,9 @@ declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_srari_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srari_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_srari_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_srari_h_RES
   ret void
 }
 
@@ -350,9 +350,9 @@ declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_srari_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srari_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_srari_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_srari_w_RES
   ret void
 }
 
@@ -369,9 +369,9 @@ declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_srari_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srari_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_srari_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_srari_d_RES
   ret void
 }
 
@@ -388,9 +388,9 @@ declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_srli_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srli_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_srli_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_srli_b_RES
   ret void
 }
 
@@ -407,9 +407,9 @@ declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_srli_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srli_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_srli_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_srli_h_RES
   ret void
 }
 
@@ -426,9 +426,9 @@ declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_srli_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srli_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_srli_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_srli_w_RES
   ret void
 }
 
@@ -445,9 +445,9 @@ declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_srli_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srli_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_srli_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_srli_d_RES
   ret void
 }
 
@@ -464,9 +464,9 @@ declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_srlri_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlri_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_srlri_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_srlri_b_RES
   ret void
 }
 
@@ -483,9 +483,9 @@ declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_srlri_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlri_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_srlri_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_srlri_h_RES
   ret void
 }
 
@@ -502,9 +502,9 @@ declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_srlri_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlri_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_srlri_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_srlri_w_RES
   ret void
 }
 
@@ -521,9 +521,9 @@ declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_srlri_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlri_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_srlri_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_srlri_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/bitcast.ll b/llvm/test/CodeGen/Mips/msa/bitcast.ll
index a816b9b84edd6..11c5a5fb42c79 100644
--- a/llvm/test/CodeGen/Mips/msa/bitcast.ll
+++ b/llvm/test/CodeGen/Mips/msa/bitcast.ll
@@ -3,13 +3,13 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck -check-prefix=BIGENDIAN %s
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck -check-prefix=LITENDIAN %s
 
-define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind {
+define void @v16i8_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -27,13 +27,13 @@ entry:
 ; BIGENDIAN: st.b [[R3]],
 ; BIGENDIAN: .size v16i8_to_v16i8
 
-define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind {
+define void @v16i8_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -54,12 +54,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind {
+define void @v16i8_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -75,13 +75,13 @@ entry:
 ; BIGENDIAN: st.b [[R2]],
 ; BIGENDIAN: .size v16i8_to_v8f16
 
-define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind {
+define void @v16i8_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -100,13 +100,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v16i8_to_v4i32
 
-define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind {
+define void @v16i8_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -125,13 +125,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v16i8_to_v4f32
 
-define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind {
+define void @v16i8_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -151,13 +151,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v16i8_to_v2i64
 
-define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind {
+define void @v16i8_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <16 x i8>, <16 x i8>* %src
+  %0 = load volatile <16 x i8>, ptr %src
   %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0)
   %2 = bitcast <16 x i8> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 
@@ -177,13 +177,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v16i8_to_v2f64
 
-define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind {
+define void @v8i16_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -202,13 +202,13 @@ entry:
 ; BIGENDIAN: st.b [[R4]],
 ; BIGENDIAN: .size v8i16_to_v16i8
 
-define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind {
+define void @v8i16_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -228,12 +228,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind {
+define void @v8i16_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -249,13 +249,13 @@ entry:
 ; BIGENDIAN: st.h [[R2]],
 ; BIGENDIAN: .size v8i16_to_v8f16
 
-define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind {
+define void @v8i16_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -274,13 +274,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v8i16_to_v4i32
 
-define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind {
+define void @v8i16_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -299,13 +299,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v8i16_to_v4f32
 
-define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind {
+define void @v8i16_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -324,13 +324,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v8i16_to_v2i64
 
-define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind {
+define void @v8i16_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x i16>, <8 x i16>* %src
+  %0 = load volatile <8 x i16>, ptr %src
   %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0)
   %2 = bitcast <8 x i16> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 
@@ -352,12 +352,12 @@ entry:
 ;----
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind {
+define void @v8f16_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <16 x i8>
   %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %dst
+  store <16 x i8> %2, ptr %dst
   ret void
 }
 
@@ -375,12 +375,12 @@ entry:
 
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind {
+define void @v8f16_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <8 x i16>
   %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %dst
+  store <8 x i16> %2, ptr %dst
   ret void
 }
 
@@ -400,11 +400,11 @@ entry:
 ; are no operations for v8f16 to put in the way.
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind {
+define void @v8f16_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <8 x half>
-  store <8 x half> %1, <8 x half>* %dst
+  store <8 x half> %1, ptr %dst
   ret void
 }
 
@@ -420,12 +420,12 @@ entry:
 
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind {
+define void @v8f16_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <4 x i32>
   %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %dst
+  store <4 x i32> %2, ptr %dst
   ret void
 }
 
@@ -443,12 +443,12 @@ entry:
 
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind {
+define void @v8f16_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <4 x float>
   %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1)
-  store <4 x float> %2, <4 x float>* %dst
+  store <4 x float> %2, ptr %dst
   ret void
 }
 
@@ -466,12 +466,12 @@ entry:
 
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind {
+define void @v8f16_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <2 x i64>
   %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %dst
+  store <2 x i64> %2, ptr %dst
   ret void
 }
 
@@ -489,12 +489,12 @@ entry:
 
 ; We can't prevent the (bitcast (load X)) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind {
+define void @v8f16_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <8 x half>, <8 x half>* %src
+  %0 = load volatile <8 x half>, ptr %src
   %1 = bitcast <8 x half> %0 to <2 x double>
   %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1)
-  store <2 x double> %2, <2 x double>* %dst
+  store <2 x double> %2, ptr %dst
   ret void
 }
 
@@ -511,13 +511,13 @@ entry:
 ; BIGENDIAN: .size v8f16_to_v2f64
 ;----
 
-define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind {
+define void @v4i32_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -536,13 +536,13 @@ entry:
 ; BIGENDIAN: st.b [[R4]],
 ; BIGENDIAN: .size v4i32_to_v16i8
 
-define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind {
+define void @v4i32_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -563,12 +563,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind {
+define void @v4i32_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -584,13 +584,13 @@ entry:
 ; BIGENDIAN: st.w [[R2]],
 ; BIGENDIAN: .size v4i32_to_v8f16
 
-define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind {
+define void @v4i32_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -608,13 +608,13 @@ entry:
 ; BIGENDIAN: st.w [[R3]],
 ; BIGENDIAN: .size v4i32_to_v4i32
 
-define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind {
+define void @v4i32_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -632,13 +632,13 @@ entry:
 ; BIGENDIAN: st.w [[R3]],
 ; BIGENDIAN: .size v4i32_to_v4f32
 
-define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind {
+define void @v4i32_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -657,13 +657,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v4i32_to_v2i64
 
-define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind {
+define void @v4i32_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x i32>, <4 x i32>* %src
+  %0 = load volatile <4 x i32>, ptr %src
   %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0)
   %2 = bitcast <4 x i32> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 
@@ -682,13 +682,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v4i32_to_v2f64
 
-define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind {
+define void @v4f32_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -707,13 +707,13 @@ entry:
 ; BIGENDIAN: st.b [[R4]],
 ; BIGENDIAN: .size v4f32_to_v16i8
 
-define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind {
+define void @v4f32_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -734,12 +734,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind {
+define void @v4f32_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -755,13 +755,13 @@ entry:
 ; BIGENDIAN: st.w [[R2]],
 ; BIGENDIAN: .size v4f32_to_v8f16
 
-define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind {
+define void @v4f32_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -779,13 +779,13 @@ entry:
 ; BIGENDIAN: st.w [[R3]],
 ; BIGENDIAN: .size v4f32_to_v4i32
 
-define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind {
+define void @v4f32_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -803,13 +803,13 @@ entry:
 ; BIGENDIAN: st.w [[R3]],
 ; BIGENDIAN: .size v4f32_to_v4f32
 
-define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind {
+define void @v4f32_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -828,13 +828,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v4f32_to_v2i64
 
-define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind {
+define void @v4f32_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* %src
+  %0 = load volatile <4 x float>, ptr %src
   %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0)
   %2 = bitcast <4 x float> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 
@@ -853,13 +853,13 @@ entry:
 ; BIGENDIAN: st.d [[R4]],
 ; BIGENDIAN: .size v4f32_to_v2f64
 
-define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind {
+define void @v2i64_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -879,13 +879,13 @@ entry:
 ; BIGENDIAN: st.b [[R4]],
 ; BIGENDIAN: .size v2i64_to_v16i8
 
-define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind {
+define void @v2i64_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -906,12 +906,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind {
+define void @v2i64_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -927,13 +927,13 @@ entry:
 ; BIGENDIAN: st.d [[R2]],
 ; BIGENDIAN: .size v2i64_to_v8f16
 
-define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind {
+define void @v2i64_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -952,13 +952,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v2i64_to_v4i32
 
-define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind {
+define void @v2i64_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -977,13 +977,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v2i64_to_v4f32
 
-define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind {
+define void @v2i64_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -1001,13 +1001,13 @@ entry:
 ; BIGENDIAN: st.d [[R3]],
 ; BIGENDIAN: .size v2i64_to_v2i64
 
-define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind {
+define void @v2i64_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x i64>, <2 x i64>* %src
+  %0 = load volatile <2 x i64>, ptr %src
   %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0)
   %2 = bitcast <2 x i64> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 
@@ -1025,13 +1025,13 @@ entry:
 ; BIGENDIAN: st.d [[R3]],
 ; BIGENDIAN: .size v2i64_to_v2f64
 
-define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind {
+define void @v2f64_to_v16i8(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <16 x i8>
   %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2)
-  store <16 x i8> %3, <16 x i8>* %dst
+  store <16 x i8> %3, ptr %dst
   ret void
 }
 
@@ -1051,13 +1051,13 @@ entry:
 ; BIGENDIAN: st.b [[R4]],
 ; BIGENDIAN: .size v2f64_to_v16i8
 
-define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind {
+define void @v2f64_to_v8i16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <8 x i16>
   %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2)
-  store <8 x i16> %3, <8 x i16>* %dst
+  store <8 x i16> %3, ptr %dst
   ret void
 }
 
@@ -1078,12 +1078,12 @@ entry:
 
 ; We can't prevent the (store (bitcast X), Y) DAG Combine here because there
 ; are no operations for v8f16 to put in the way.
-define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind {
+define void @v2f64_to_v8f16(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <8 x half>
-  store <8 x half> %2, <8 x half>* %dst
+  store <8 x half> %2, ptr %dst
   ret void
 }
 
@@ -1099,13 +1099,13 @@ entry:
 ; BIGENDIAN: st.d [[R2]],
 ; BIGENDIAN: .size v2f64_to_v8f16
 
-define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind {
+define void @v2f64_to_v4i32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <4 x i32>
   %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2)
-  store <4 x i32> %3, <4 x i32>* %dst
+  store <4 x i32> %3, ptr %dst
   ret void
 }
 
@@ -1124,13 +1124,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v2f64_to_v4i32
 
-define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind {
+define void @v2f64_to_v4f32(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <4 x float>
   %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2)
-  store <4 x float> %3, <4 x float>* %dst
+  store <4 x float> %3, ptr %dst
   ret void
 }
 
@@ -1149,13 +1149,13 @@ entry:
 ; BIGENDIAN: st.w [[R4]],
 ; BIGENDIAN: .size v2f64_to_v4f32
 
-define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind {
+define void @v2f64_to_v2i64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <2 x i64>
   %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2)
-  store <2 x i64> %3, <2 x i64>* %dst
+  store <2 x i64> %3, ptr %dst
   ret void
 }
 
@@ -1173,13 +1173,13 @@ entry:
 ; BIGENDIAN: st.d [[R3]],
 ; BIGENDIAN: .size v2f64_to_v2i64
 
-define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind {
+define void @v2f64_to_v2f64(ptr %src, ptr %dst) nounwind {
 entry:
-  %0 = load volatile <2 x double>, <2 x double>* %src
+  %0 = load volatile <2 x double>, ptr %src
   %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0)
   %2 = bitcast <2 x double> %1 to <2 x double>
   %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2)
-  store <2 x double> %3, <2 x double>* %dst
+  store <2 x double> %3, ptr %dst
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/bitwise.ll b/llvm/test/CodeGen/Mips/msa/bitwise.ll
index 4ff23a4c7d3d3..c7790b71b9636 100644
--- a/llvm/test/CodeGen/Mips/msa/bitwise.ll
+++ b/llvm/test/CodeGen/Mips/msa/bitwise.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=CHECK,MIPS
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s --check-prefixes=CHECK,MIPSEL
 
-define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @and_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: and_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -10,14 +10,14 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = and <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @and_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: and_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -25,14 +25,14 @@ define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = and <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @and_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: and_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -40,14 +40,14 @@ define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = and <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @and_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: and_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -55,27 +55,27 @@ define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = and <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @and_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: and_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    andi.b $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = and <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @and_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: and_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -83,13 +83,13 @@ define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = and <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @and_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: and_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -97,13 +97,13 @@ define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
 ; CHECK-NEXT:    and.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @and_v2i64_i(ptr %c, ptr %a) nounwind {
 ; MIPS-LABEL: and_v2i64_i:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.d $w0, 1
@@ -120,13 +120,13 @@ define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
 ; MIPSEL-NEXT:    and.v $w0, $w1, $w0
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = and <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @or_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: or_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -134,14 +134,14 @@ define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = or <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @or_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: or_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -149,14 +149,14 @@ define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = or <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @or_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: or_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -164,14 +164,14 @@ define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = or <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @or_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: or_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -179,27 +179,27 @@ define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = or <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @or_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: or_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    ori.b $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = or <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @or_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: or_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -207,13 +207,13 @@ define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = or <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @or_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: or_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -221,13 +221,13 @@ define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
 ; CHECK-NEXT:    or.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = or <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @or_v2i64_i(ptr %c, ptr %a) nounwind {
 ; MIPS-LABEL: or_v2i64_i:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.d $w0, 3
@@ -244,13 +244,13 @@ define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
 ; MIPSEL-NEXT:    or.v $w0, $w1, $w0
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = or <2 x i64> %1, <i64 3, i64 3>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @nor_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: nor_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -258,15 +258,15 @@ define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = or <16 x i8> %1, %2
   %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ret void
 }
 
-define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @nor_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: nor_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -274,15 +274,15 @@ define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = or <8 x i16> %1, %2
   %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ret void
 }
 
-define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @nor_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: nor_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -290,15 +290,15 @@ define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = or <4 x i32> %1, %2
   %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ret void
 }
 
-define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @nor_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: nor_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -306,29 +306,29 @@ define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = or <2 x i64> %1, %2
   %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ret void
 }
 
-define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @nor_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: nor_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    nori.b $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = or <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @nor_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: nor_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -336,14 +336,14 @@ define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = or <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = xor <8 x i16> %2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @nor_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: nor_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -351,14 +351,14 @@ define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
 ; CHECK-NEXT:    nor.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = or <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = xor <4 x i32> %2, <i32 -1, i32 -1, i32 -1, i32 -1>
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @nor_v2i64_i(ptr %c, ptr %a) nounwind {
 ; MIPS-LABEL: nor_v2i64_i:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.d $w0, 1
@@ -375,14 +375,14 @@ define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
 ; MIPSEL-NEXT:    nor.v $w0, $w1, $w0
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = or <2 x i64> %1, <i64 1, i64 1>
   %3 = xor <2 x i64> %2, <i64 -1, i64 -1>
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @xor_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: xor_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -390,14 +390,14 @@ define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = xor <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @xor_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: xor_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -405,14 +405,14 @@ define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = xor <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @xor_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: xor_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -420,14 +420,14 @@ define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = xor <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @xor_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: xor_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -435,27 +435,27 @@ define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = xor <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @xor_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: xor_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    xori.b $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = xor <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @xor_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: xor_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -463,13 +463,13 @@ define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = xor <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @xor_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: xor_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -477,13 +477,13 @@ define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
 ; CHECK-NEXT:    xor.v $w0, $w0, $w1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = xor <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @xor_v2i64_i(ptr %c, ptr %a) nounwind {
 ; MIPS-LABEL: xor_v2i64_i:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.d $w0, 3
@@ -500,13 +500,13 @@ define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
 ; MIPSEL-NEXT:    xor.v $w0, $w1, $w0
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = xor <2 x i64> %1, <i64 3, i64 3>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sll_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sll_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -514,14 +514,14 @@ define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    sll.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = shl <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sll_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sll_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -529,14 +529,14 @@ define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    sll.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = shl <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sll_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sll_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -544,14 +544,14 @@ define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    sll.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = shl <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sll_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sll_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -559,66 +559,66 @@ define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    sll.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = shl <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sll_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sll_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    slli.b $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = shl <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sll_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sll_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    slli.h $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = shl <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sll_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sll_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    slli.w $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = shl <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sll_v2i64_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sll_v2i64_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    slli.d $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = shl <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @sra_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sra_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -626,14 +626,14 @@ define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    sra.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = ashr <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @sra_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sra_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -641,14 +641,14 @@ define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    sra.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = ashr <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @sra_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sra_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -656,14 +656,14 @@ define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    sra.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = ashr <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @sra_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: sra_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -671,66 +671,66 @@ define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    sra.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = ashr <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @sra_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sra_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    srai.b $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = ashr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @sra_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sra_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    srai.h $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = ashr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @sra_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sra_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    srai.w $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = ashr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @sra_v2i64_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: sra_v2i64_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    srai.d $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = ashr <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @srl_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: srl_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -738,14 +738,14 @@ define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    srl.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = lshr <16 x i8> %1, %2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @srl_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: srl_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -753,14 +753,14 @@ define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    srl.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = lshr <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @srl_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: srl_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -768,14 +768,14 @@ define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    srl.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = lshr <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @srl_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: srl_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -783,170 +783,170 @@ define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    srl.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = lshr <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @srl_v16i8_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: srl_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    srli.b $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = lshr <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @srl_v8i16_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: srl_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    srli.h $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = lshr <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @srl_v4i32_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: srl_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    srli.w $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = lshr <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @srl_v2i64_i(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: srl_v2i64_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    srli.d $w0, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = lshr <2 x i64> %1, <i64 1, i64 1>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ctpop_v16i8(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctpop_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    pcnt.b $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ctpop_v8i16(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctpop_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    pcnt.h $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ctpop_v4i32(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctpop_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    pcnt.w $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ctpop_v2i64(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctpop_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    pcnt.d $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ctlz_v16i8(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctlz_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    nlzc.b $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ctlz_v8i16(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctlz_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    nlzc.h $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ctlz_v4i32(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctlz_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    nlzc.w $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ctlz_v2i64(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: ctlz_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    nlzc.d $w0, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind {
+define void @bsel_v16i8(ptr %c, ptr %a, ptr %b, ptr %m) nounwind {
 ; CHECK-LABEL: bsel_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($7)
@@ -955,9 +955,9 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>*
 ; CHECK-NEXT:    bmnz.v $w2, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w2, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
-  %3 = load <16 x i8>, <16 x i8>* %m
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
+  %3 = load <16 x i8>, ptr %m
   %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1,
                           i8 -1, i8 -1, i8 -1, i8 -1,
                           i8 -1, i8 -1, i8 -1, i8 -1,
@@ -967,11 +967,11 @@ define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>*
   %7 = or <16 x i8> %5, %6
   ; bmnz is the same operation
   ; (vselect Mask, IfSet, IfClr) -> (BMNZ IfClr, IfSet, Mask)
-  store <16 x i8> %7, <16 x i8>* %c
+  store <16 x i8> %7, ptr %c
   ret void
 }
 
-define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind {
+define void @bsel_v16i8_i(ptr %c, ptr %a, ptr %m) nounwind {
 ; CHECK-LABEL: bsel_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
@@ -979,8 +979,8 @@ define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind
 ; CHECK-NEXT:    bseli.b $w1, $w0, 6
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w1, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %m
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %m
   %3 = xor <16 x i8> %2, <i8 -1, i8 -1, i8 -1, i8 -1,
                           i8 -1, i8 -1, i8 -1, i8 -1,
                           i8 -1, i8 -1, i8 -1, i8 -1,
@@ -991,11 +991,11 @@ define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind
                       i8 6, i8 6, i8 6, i8 6,
                       i8 6, i8 6, i8 6, i8 6>, %2
   %6 = or <16 x i8> %4, %5
-  store <16 x i8> %6, <16 x i8>* %c
+  store <16 x i8> %6, ptr %c
   ret void
 }
 
-define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bsel_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bsel_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -1004,18 +1004,18 @@ define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    bsel.v $w2, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w2, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = and <8 x i16> %1, <i16 6, i16 6, i16 6, i16 6,
                           i16 6, i16 6, i16 6, i16 6>
   %4 = and <8 x i16> %2, <i16 65529, i16 65529, i16 65529, i16 65529,
                           i16 65529, i16 65529, i16 65529, i16 65529>
   %5 = or <8 x i16> %3, %4
-  store <8 x i16> %5, <8 x i16>* %c
+  store <8 x i16> %5, ptr %c
   ret void
 }
 
-define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bsel_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bsel_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -1024,16 +1024,16 @@ define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    bsel.v $w2, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w2, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = and <4 x i32> %1, <i32 6, i32 6, i32 6, i32 6>
   %4 = and <4 x i32> %2, <i32 4294967289, i32 4294967289, i32 4294967289, i32 4294967289>
   %5 = or <4 x i32> %3, %4
-  store <4 x i32> %5, <4 x i32>* %c
+  store <4 x i32> %5, ptr %c
   ret void
 }
 
-define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bsel_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; MIPS-LABEL: bsel_v2i64:
 ; MIPS:       # %bb.0:
 ; MIPS-NEXT:    ldi.d $w0, 6
@@ -1052,16 +1052,16 @@ define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; MIPSEL-NEXT:    bsel.v $w0, $w2, $w1
 ; MIPSEL-NEXT:    jr $ra
 ; MIPSEL-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = and <2 x i64> %1, <i64 6, i64 6>
   %4 = and <2 x i64> %2, <i64 18446744073709551609, i64 18446744073709551609>
   %5 = or <2 x i64> %3, %4
-  store <2 x i64> %5, <2 x i64>* %c
+  store <2 x i64> %5, ptr %c
   ret void
 }
 
-define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @binsl_v16i8_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsl_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
@@ -1069,8 +1069,8 @@ define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
 ; CHECK-NEXT:    binsli.b $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w1, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = and <16 x i8> %1, <i8 192, i8 192, i8 192, i8 192,
                           i8 192, i8 192, i8 192, i8 192,
                           i8 192, i8 192, i8 192, i8 192,
@@ -1080,11 +1080,11 @@ define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
                           i8 63, i8 63, i8 63, i8 63,
                           i8 63, i8 63, i8 63, i8 63>
   %5 = or <16 x i8> %3, %4
-  store <16 x i8> %5, <16 x i8>* %c
+  store <16 x i8> %5, ptr %c
   ret void
 }
 
-define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @binsl_v8i16_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsl_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -1092,18 +1092,18 @@ define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
 ; CHECK-NEXT:    binsli.h $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w1, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = and <8 x i16> %1, <i16 49152, i16 49152, i16 49152, i16 49152,
                           i16 49152, i16 49152, i16 49152, i16 49152>
   %4 = and <8 x i16> %2, <i16 16383, i16 16383, i16 16383, i16 16383,
                           i16 16383, i16 16383, i16 16383, i16 16383>
   %5 = or <8 x i16> %3, %4
-  store <8 x i16> %5, <8 x i16>* %c
+  store <8 x i16> %5, ptr %c
   ret void
 }
 
-define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @binsl_v4i32_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsl_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -1111,16 +1111,16 @@ define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
 ; CHECK-NEXT:    binsli.w $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w1, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = and <4 x i32> %1, <i32 3221225472, i32 3221225472, i32 3221225472, i32 3221225472>
   %4 = and <4 x i32> %2, <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
   %5 = or <4 x i32> %3, %4
-  store <4 x i32> %5, <4 x i32>* %c
+  store <4 x i32> %5, ptr %c
   ret void
 }
 
-define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @binsl_v2i64_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsl_v2i64_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
@@ -1128,8 +1128,8 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
 ; CHECK-NEXT:    binsli.d $w1, $w0, 60
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w1, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = and <2 x i64> %1, <i64 18446744073709551608, i64 18446744073709551608>
   %4 = and <2 x i64> %2, <i64 7, i64 7>
   %5 = or <2 x i64> %3, %4
@@ -1137,11 +1137,11 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
   ;       issue. If the mask doesn't fit within a 10-bit immediate, it gets
   ;       legalized into a constant pool. We should add a test to cover the
   ;       other cases once they correctly select binsli.d.
-  store <2 x i64> %5, <2 x i64>* %c
+  store <2 x i64> %5, ptr %c
   ret void
 }
 
-define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @binsr_v16i8_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsr_v16i8_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
@@ -1149,8 +1149,8 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
 ; CHECK-NEXT:    binsri.b $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w1, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = and <16 x i8> %1, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3,
                           i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   %4 = and <16 x i8> %2, <i8 252, i8 252, i8 252, i8 252,
@@ -1158,11 +1158,11 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
                           i8 252, i8 252, i8 252, i8 252,
                           i8 252, i8 252, i8 252, i8 252>
   %5 = or <16 x i8> %3, %4
-  store <16 x i8> %5, <16 x i8>* %c
+  store <16 x i8> %5, ptr %c
   ret void
 }
 
-define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @binsr_v8i16_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsr_v8i16_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
@@ -1170,18 +1170,18 @@ define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
 ; CHECK-NEXT:    binsri.h $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w1, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = and <8 x i16> %1, <i16 3, i16 3, i16 3, i16 3,
                           i16 3, i16 3, i16 3, i16 3>
   %4 = and <8 x i16> %2, <i16 65532, i16 65532, i16 65532, i16 65532,
                           i16 65532, i16 65532, i16 65532, i16 65532>
   %5 = or <8 x i16> %3, %4
-  store <8 x i16> %5, <8 x i16>* %c
+  store <8 x i16> %5, ptr %c
   ret void
 }
 
-define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @binsr_v4i32_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsr_v4i32_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
@@ -1189,16 +1189,16 @@ define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
 ; CHECK-NEXT:    binsri.w $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w1, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = and <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
   %4 = and <4 x i32> %2, <i32 4294967292, i32 4294967292, i32 4294967292, i32 4294967292>
   %5 = or <4 x i32> %3, %4
-  store <4 x i32> %5, <4 x i32>* %c
+  store <4 x i32> %5, ptr %c
   ret void
 }
 
-define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @binsr_v2i64_i(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: binsr_v2i64_i:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
@@ -1206,16 +1206,16 @@ define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
 ; CHECK-NEXT:    binsri.d $w1, $w0, 1
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w1, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = and <2 x i64> %1, <i64 3, i64 3>
   %4 = and <2 x i64> %2, <i64 18446744073709551612, i64 18446744073709551612>
   %5 = or <2 x i64> %3, %4
-  store <2 x i64> %5, <2 x i64>* %c
+  store <2 x i64> %5, ptr %c
   ret void
 }
 
-define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bclr_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bclr_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -1223,16 +1223,16 @@ define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    bclr.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
   %4 = xor <16 x i8> %3, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %5 = and <16 x i8> %1, %4
-  store <16 x i8> %5, <16 x i8>* %c
+  store <16 x i8> %5, ptr %c
   ret void
 }
 
-define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bclr_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bclr_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -1240,16 +1240,16 @@ define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    bclr.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
   %4 = xor <8 x i16> %3, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %5 = and <8 x i16> %1, %4
-  store <8 x i16> %5, <8 x i16>* %c
+  store <8 x i16> %5, ptr %c
   ret void
 }
 
-define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bclr_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bclr_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -1257,16 +1257,16 @@ define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    bclr.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
   %4 = xor <4 x i32> %3, <i32 -1, i32 -1, i32 -1, i32 -1>
   %5 = and <4 x i32> %1, %4
-  store <4 x i32> %5, <4 x i32>* %c
+  store <4 x i32> %5, ptr %c
   ret void
 }
 
-define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bclr_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bclr_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -1274,16 +1274,16 @@ define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    bclr.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = shl <2 x i64> <i64 1, i64 1>, %2
   %4 = xor <2 x i64> %3, <i64 -1, i64 -1>
   %5 = and <2 x i64> %1, %4
-  store <2 x i64> %5, <2 x i64>* %c
+  store <2 x i64> %5, ptr %c
   ret void
 }
 
-define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bset_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bset_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -1291,15 +1291,15 @@ define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    bset.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
   %4 = or <16 x i8> %1, %3
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ret void
 }
 
-define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bset_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bset_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -1307,15 +1307,15 @@ define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    bset.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
   %4 = or <8 x i16> %1, %3
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ret void
 }
 
-define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bset_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bset_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -1323,15 +1323,15 @@ define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    bset.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
   %4 = or <4 x i32> %1, %3
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ret void
 }
 
-define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bset_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bset_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -1339,15 +1339,15 @@ define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    bset.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = shl <2 x i64> <i64 1, i64 1>, %2
   %4 = or <2 x i64> %1, %3
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ret void
 }
 
-define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @bneg_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bneg_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($6)
@@ -1355,15 +1355,15 @@ define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 ; CHECK-NEXT:    bneg.b $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   %3 = shl <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, %2
   %4 = xor <16 x i8> %1, %3
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ret void
 }
 
-define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @bneg_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bneg_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($6)
@@ -1371,15 +1371,15 @@ define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 ; CHECK-NEXT:    bneg.h $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   %3 = shl <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, %2
   %4 = xor <8 x i16> %1, %3
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ret void
 }
 
-define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @bneg_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bneg_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($6)
@@ -1387,15 +1387,15 @@ define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 ; CHECK-NEXT:    bneg.w $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   %3 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %2
   %4 = xor <4 x i32> %1, %3
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ret void
 }
 
-define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @bneg_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
 ; CHECK-LABEL: bneg_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($6)
@@ -1403,176 +1403,176 @@ define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 ; CHECK-NEXT:    bneg.d $w0, $w1, $w0
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   %3 = shl <2 x i64> <i64 1, i64 1>, %2
   %4 = xor <2 x i64> %1, %3
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ret void
 }
 
-define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bclri_v16i8(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bclri_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    andi.b $w0, $w0, 247
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = xor <16 x i8> <i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8, i8  8>,
                      <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %3 = and <16 x i8> %1, %2
   ; bclri.b and andi.b are exactly equivalent.
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ret void
 }
 
-define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bclri_v8i16(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bclri_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    bclri.h $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = xor <8 x i16> <i16  8, i16  8, i16  8, i16  8, i16  8, i16  8, i16  8, i16  8>,
                      <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %3 = and <8 x i16> %1, %2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ret void
 }
 
-define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bclri_v4i32(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bclri_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    bclri.w $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = xor <4 x i32> <i32  8, i32  8, i32  8, i32  8>,
                      <i32 -1, i32 -1, i32 -1, i32 -1>
   %3 = and <4 x i32> %1, %2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ret void
 }
 
-define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bclri_v2i64(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bclri_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    bclri.d $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = xor <2 x i64> <i64  8, i64  8>,
                      <i64 -1, i64 -1>
   %3 = and <2 x i64> %1, %2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ret void
 }
 
-define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bseti_v16i8(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bseti_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    bseti.b $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = or <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bseti_v8i16(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bseti_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    bseti.h $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = or <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bseti_v4i32(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bseti_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    bseti.w $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = or <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bseti_v2i64(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bseti_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    bseti.d $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = or <2 x i64> %1, <i64 8, i64 8>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 
-define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @bnegi_v16i8(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bnegi_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.b $w0, 0($5)
 ; CHECK-NEXT:    bnegi.b $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($4)
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   %2 = xor <16 x i8> %1, <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ret void
 }
 
-define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @bnegi_v8i16(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bnegi_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.h $w0, 0($5)
 ; CHECK-NEXT:    bnegi.h $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($4)
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   %2 = xor <8 x i16> %1, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ret void
 }
 
-define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @bnegi_v4i32(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bnegi_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.w $w0, 0($5)
 ; CHECK-NEXT:    bnegi.w $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($4)
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   %2 = xor <4 x i32> %1, <i32 8, i32 8, i32 8, i32 8>
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ret void
 }
 
-define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @bnegi_v2i64(ptr %c, ptr %a) nounwind {
 ; CHECK-LABEL: bnegi_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ld.d $w0, 0($5)
 ; CHECK-NEXT:    bnegi.d $w0, $w0, 3
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($4)
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   %2 = xor <2 x i64> %1, <i64 8, i64 8>
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/bmzi_bmnzi.ll b/llvm/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
index 6e1f720967025..ab2c36eae67da 100644
--- a/llvm/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
+++ b/llvm/test/CodeGen/Mips/msa/bmzi_bmnzi.ll
@@ -6,14 +6,14 @@
 
 define void @llvm_mips_bmnzi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
-  store volatile <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store volatile <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
   %3 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
-  store volatile <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store volatile <16 x i8> %3, ptr @llvm_mips_bmnzi_b_RES
   %4 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
-  store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store <16 x i8> %4, ptr @llvm_mips_bmnzi_b_RES
   ret void
 }
 ; CHECK-LABEL: llvm_mips_bmnzi_b_test:
@@ -29,14 +29,14 @@ entry:
 
 define void @llvm_mips_bmzi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 240)
-  store volatile <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store volatile <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
   %3 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 15)
-  store volatile <16 x i8> %3, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store volatile <16 x i8> %3, ptr @llvm_mips_bmnzi_b_RES
   %4 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 170)
-  store <16 x i8> %4, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store <16 x i8> %4, ptr @llvm_mips_bmnzi_b_RES
   ret void
 }
 ; CHECK-LABEL: llvm_mips_bmzi_b_test:

diff  --git a/llvm/test/CodeGen/Mips/msa/compare.ll b/llvm/test/CodeGen/Mips/msa/compare.ll
index b977506832170..a3910bde8cd1c 100644
--- a/llvm/test/CodeGen/Mips/msa/compare.ll
+++ b/llvm/test/CodeGen/Mips/msa/compare.ll
@@ -1,340 +1,340 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
-define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ceq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ceq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp eq <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceq_v16i8
 }
 
-define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ceq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ceq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp eq <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
   ; CHECK-DAG: ceq.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceq_v8i16
 }
 
-define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ceq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ceq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp eq <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: ceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceq_v4i32
 }
 
-define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ceq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ceq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp eq <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: ceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceq_v2i64
 }
 
-define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cle_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: cle_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_s_v16i8
 }
 
-define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cle_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
   ; CHECK-DAG: cle_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_s_v8i16
 }
 
-define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cle_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: cle_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_s_v4i32
 }
 
-define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cle_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: cle_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_s_v2i64
 }
 
-define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cle_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: cle_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_u_v16i8
 }
 
-define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cle_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
   ; CHECK-DAG: cle_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_u_v8i16
 }
 
-define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cle_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: cle_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_u_v4i32
 }
 
-define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cle_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cle_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: cle_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cle_u_v2i64
 }
 
-define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @clt_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: clt_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_s_v16i8
 }
 
-define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @clt_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
   ; CHECK-DAG: clt_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_s_v8i16
 }
 
-define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @clt_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: clt_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_s_v4i32
 }
 
-define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @clt_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: clt_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_s_v2i64
 }
 
-define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @clt_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: clt_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_u_v16i8
 }
 
-define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @clt_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
   ; CHECK-DAG: clt_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_u_v8i16
 }
 
-define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @clt_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: clt_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clt_u_v4i32
 }
 
-define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @clt_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: clt_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: clt_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
@@ -343,17 +343,17 @@ define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
 
 ; There is no != comparison, but test it anyway since we've had legalizer
 ; issues in this area.
-define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @cne_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cne_v16i8:
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ne <16 x i8> %1, %2
   %4 = sext <16 x i1> %3 to <16 x i8>
   ; CHECK-DAG: ceq.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
   ; CHECK-DAG: xori.b [[R3]], [[R3]], 255
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
@@ -362,12 +362,12 @@ define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
 
 ; There is no != comparison, but test it anyway since we've had legalizer
 ; issues in this area.
-define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @cne_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cne_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ne <8 x i16> %1, %2
   %4 = sext <8 x i1> %3 to <8 x i16>
@@ -375,7 +375,7 @@ define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
   ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
   ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
   ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
@@ -384,12 +384,12 @@ define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
 
 ; There is no != comparison, but test it anyway since we've had legalizer
 ; issues in this area.
-define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @cne_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cne_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ne <4 x i32> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
@@ -397,7 +397,7 @@ define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
   ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
   ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
   ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
@@ -406,12 +406,12 @@ define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 
 ; There is no != comparison, but test it anyway since we've had legalizer
 ; issues in this area.
-define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @cne_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: cne_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ne <2 x i64> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
@@ -419,1665 +419,1665 @@ define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
   ; TODO: This should be an 'xori.b [[R3]], [[R3]], 255' but thats an optimisation issue
   ; CHECK-DAG: ldi.b [[R4:\$w[0-9]+]], -1
   ; CHECK-DAG: xor.v [[R3]], [[R3]], [[R4]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size cne_v2i64
 }
 
-define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @ceqi_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: ceqi_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp eq <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = sext <16 x i1> %2 to <16 x i8>
   ; CHECK-DAG: ceqi.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceqi_v16i8
 }
 
-define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @ceqi_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: ceqi_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp eq <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = sext <8 x i1> %2 to <8 x i16>
   ; CHECK-DAG: ceqi.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceqi_v8i16
 }
 
-define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @ceqi_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: ceqi_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp eq <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = sext <4 x i1> %2 to <4 x i32>
   ; CHECK-DAG: ceqi.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceqi_v4i32
 }
 
-define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @ceqi_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: ceqi_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp eq <2 x i64> %1, <i64 1, i64 1>
   %3 = sext <2 x i1> %2 to <2 x i64>
   ; CHECK-DAG: ceqi.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ceqi_v2i64
 }
 
-define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clei_s_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = sext <16 x i1> %2 to <16 x i8>
   ; CHECK-DAG: clei_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_s_v16i8
 }
 
-define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clei_s_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = sext <8 x i1> %2 to <8 x i16>
   ; CHECK-DAG: clei_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_s_v8i16
 }
 
-define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clei_s_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = sext <4 x i1> %2 to <4 x i32>
   ; CHECK-DAG: clei_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_s_v4i32
 }
 
-define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clei_s_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
   %3 = sext <2 x i1> %2 to <2 x i64>
   ; CHECK-DAG: clei_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_s_v2i64
 }
 
-define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clei_u_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = sext <16 x i1> %2 to <16 x i8>
   ; CHECK-DAG: clei_u.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_u_v16i8
 }
 
-define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clei_u_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = sext <8 x i1> %2 to <8 x i16>
   ; CHECK-DAG: clei_u.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_u_v8i16
 }
 
-define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clei_u_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = sext <4 x i1> %2 to <4 x i32>
   ; CHECK-DAG: clei_u.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_u_v4i32
 }
 
-define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clei_u_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: clei_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
   %3 = sext <2 x i1> %2 to <2 x i64>
   ; CHECK-DAG: clei_u.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clei_u_v2i64
 }
 
-define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clti_s_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = sext <16 x i1> %2 to <16 x i8>
   ; CHECK-DAG: clti_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_s_v16i8
 }
 
-define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clti_s_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = sext <8 x i1> %2 to <8 x i16>
   ; CHECK-DAG: clti_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_s_v8i16
 }
 
-define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clti_s_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = sext <4 x i1> %2 to <4 x i32>
   ; CHECK-DAG: clti_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_s_v4i32
 }
 
-define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clti_s_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
   %3 = sext <2 x i1> %2 to <2 x i64>
   ; CHECK-DAG: clti_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_s_v2i64
 }
 
-define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @clti_u_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <16 x i8> %1, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
   %3 = sext <16 x i1> %2 to <16 x i8>
   ; CHECK-DAG: clti_u.b [[R3:\$w[0-9]+]], [[R1]], 2
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_u_v16i8
 }
 
-define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @clti_u_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <8 x i16> %1, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   %3 = sext <8 x i1> %2 to <8 x i16>
   ; CHECK-DAG: clti_u.h [[R3:\$w[0-9]+]], [[R1]], 2
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_u_v8i16
 }
 
-define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @clti_u_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
   %3 = sext <4 x i1> %2 to <4 x i32>
   ; CHECK-DAG: clti_u.w [[R3:\$w[0-9]+]], [[R1]], 2
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_u_v4i32
 }
 
-define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @clti_u_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: clti_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <2 x i64> %1, <i64 2, i64 2>
   %3 = sext <2 x i1> %2 to <2 x i64>
   ; CHECK-DAG: clti_u.d [[R3:\$w[0-9]+]], [[R1]], 2
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size clti_u_v2i64
 }
 
-define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
-                        <16 x i8>* %c) nounwind {
+define void @bsel_s_v16i8(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <16 x i8>, <16 x i8>* %c
+  %3 = load <16 x i8>, ptr %c
   ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp sgt <16 x i8> %1, %2
   ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
   ; bmnz.v is the same operation
   ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
-  store <16 x i8> %5, <16 x i8>* %d
+  store <16 x i8> %5, ptr %d
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size bsel_s_v16i8
 }
 
-define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
-                        <8 x i16>* %c) nounwind {
+define void @bsel_s_v8i16(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <8 x i16>, <8 x i16>* %c
+  %3 = load <8 x i16>, ptr %c
   ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp sgt <8 x i16> %1, %2
   ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <8 x i16> %5, <8 x i16>* %d
+  store <8 x i16> %5, ptr %d
   ; CHECK-DAG: st.h [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_s_v8i16
 }
 
-define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
-                        <4 x i32>* %c) nounwind {
+define void @bsel_s_v4i32(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <4 x i32>, <4 x i32>* %c
+  %3 = load <4 x i32>, ptr %c
   ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp sgt <4 x i32> %1, %2
   ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <4 x i32> %5, <4 x i32>* %d
+  store <4 x i32> %5, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_s_v4i32
 }
 
-define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
-                        <2 x i64>* %c) nounwind {
+define void @bsel_s_v2i64(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <2 x i64>, <2 x i64>* %c
+  %3 = load <2 x i64>, ptr %c
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp sgt <2 x i64> %1, %2
   ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <2 x i64> %5, <2 x i64>* %d
+  store <2 x i64> %5, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_s_v2i64
 }
 
-define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
-                        <16 x i8>* %c) nounwind {
+define void @bsel_u_v16i8(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <16 x i8>, <16 x i8>* %c
+  %3 = load <16 x i8>, ptr %c
   ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp ugt <16 x i8> %1, %2
   ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <16 x i1> %4, <16 x i8> %1, <16 x i8> %3
   ; bmnz.v is the same operation
   ; CHECK-DAG: bmnz.v [[R3]], [[R1]], [[R4]]
-  store <16 x i8> %5, <16 x i8>* %d
+  store <16 x i8> %5, ptr %d
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size bsel_u_v16i8
 }
 
-define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
-                        <8 x i16>* %c) nounwind {
+define void @bsel_u_v8i16(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <8 x i16>, <8 x i16>* %c
+  %3 = load <8 x i16>, ptr %c
   ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp ugt <8 x i16> %1, %2
   ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <8 x i1> %4, <8 x i16> %1, <8 x i16> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <8 x i16> %5, <8 x i16>* %d
+  store <8 x i16> %5, ptr %d
   ; CHECK-DAG: st.h [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_u_v8i16
 }
 
-define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
-                        <4 x i32>* %c) nounwind {
+define void @bsel_u_v4i32(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <4 x i32>, <4 x i32>* %c
+  %3 = load <4 x i32>, ptr %c
   ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp ugt <4 x i32> %1, %2
   ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <4 x i1> %4, <4 x i32> %1, <4 x i32> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <4 x i32> %5, <4 x i32>* %d
+  store <4 x i32> %5, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_u_v4i32
 }
 
-define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
-                        <2 x i64>* %c) nounwind {
+define void @bsel_u_v2i64(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bsel_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <2 x i64>, <2 x i64>* %c
+  %3 = load <2 x i64>, ptr %c
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
   %4 = icmp ugt <2 x i64> %1, %2
   ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <2 x i1> %4, <2 x i64> %1, <2 x i64> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <2 x i64> %5, <2 x i64>* %d
+  store <2 x i64> %5, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_u_v2i64
 }
 
-define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
-                        <16 x i8>* %c) nounwind {
+define void @bseli_s_v16i8(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <16 x i8> %1, %2
   ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
   ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
-  store <16 x i8> %4, <16 x i8>* %d
+  store <16 x i8> %4, ptr %d
   ; CHECK-DAG: st.b [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_s_v16i8
 }
 
-define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
-                        <8 x i16>* %c) nounwind {
+define void @bseli_s_v8i16(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <8 x i16> %1, %2
   ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
   ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <8 x i16> %4, <8 x i16>* %d
+  store <8 x i16> %4, ptr %d
   ; CHECK-DAG: st.h [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_s_v8i16
 }
 
-define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
-                        <4 x i32>* %c) nounwind {
+define void @bseli_s_v4i32(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <4 x i32> %1, %2
   ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
   ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <4 x i32> %4, <4 x i32>* %d
+  store <4 x i32> %4, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_s_v4i32
 }
 
-define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
-                        <2 x i64>* %c) nounwind {
+define void @bseli_s_v2i64(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <2 x i64> %1, %2
   ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
   ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <2 x i64> %4, <2 x i64>* %d
+  store <2 x i64> %4, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_s_v2i64
 }
 
-define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b,
-                        <16 x i8>* %c) nounwind {
+define void @bseli_u_v16i8(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <16 x i8> %1, %2
   ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <16 x i1> %3, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <16 x i8> %1
   ; CHECK-DAG: bseli.b [[R4]], [[R1]], 1
-  store <16 x i8> %4, <16 x i8>* %d
+  store <16 x i8> %4, ptr %d
   ; CHECK-DAG: st.b [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_u_v16i8
 }
 
-define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b,
-                        <8 x i16>* %c) nounwind {
+define void @bseli_u_v8i16(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <8 x i16> %1, %2
   ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <8 x i1> %3, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>, <8 x i16> %1
   ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <8 x i16> %4, <8 x i16>* %d
+  store <8 x i16> %4, ptr %d
   ; CHECK-DAG: st.h [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_u_v8i16
 }
 
-define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b,
-                        <4 x i32>* %c) nounwind {
+define void @bseli_u_v4i32(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <4 x i32> %1, %2
   ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <4 x i1> %3, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> %1
   ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <4 x i32> %4, <4 x i32>* %d
+  store <4 x i32> %4, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_u_v4i32
 }
 
-define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b,
-                        <2 x i64>* %c) nounwind {
+define void @bseli_u_v2i64(ptr %d, ptr %a, ptr %b,
+                        ptr %c) nounwind {
   ; CHECK: bseli_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <2 x i64> %1, %2
   ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <2 x i1> %3, <2 x i64> <i64 1, i64 1>, <2 x i64> %1
   ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1
   ; CHECK-DAG: bsel.v [[R4]], [[R1]], [[R3]]
-  store <2 x i64> %4, <2 x i64>* %d
+  store <2 x i64> %4, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_u_v2i64
 }
 
-define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_v16i8
 }
 
-define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_v8i16
 }
 
-define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_v4i32
 }
 
-define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sgt <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_v2i64
 }
 
-define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_v16i8
 }
 
-define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_v8i16
 }
 
-define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_v4i32
 }
 
-define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ugt <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_v2i64
 }
 
-define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_s_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sge <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: max_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_eq_v16i8
 }
 
-define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_s_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sge <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: max_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_eq_v8i16
 }
 
-define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_s_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sge <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: max_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_eq_v4i32
 }
 
-define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_s_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_s_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sge <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: max_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_s_eq_v2i64
 }
 
-define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @max_u_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp uge <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: max_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_eq_v16i8
 }
 
-define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @max_u_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp uge <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: max_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_eq_v8i16
 }
 
-define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @max_u_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp uge <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: max_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_eq_v4i32
 }
 
-define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @max_u_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_u_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp uge <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: max_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_u_eq_v2i64
 }
 
-define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_s_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sgt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_v16i8
 }
 
-define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_s_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sgt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_v8i16
 }
 
-define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_s_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sgt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_v4i32
 }
 
-define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_s_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sgt <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_v2i64
 }
 
-define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_u_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ugt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_v16i8
 }
 
-define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_u_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ugt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_v8i16
 }
 
-define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_u_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ugt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_v4i32
 }
 
-define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_u_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ugt <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_v2i64
 }
 
-define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_s_eq_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: maxi_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_eq_v16i8
 }
 
-define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_s_eq_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: maxi_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_eq_v8i16
 }
 
-define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_s_eq_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: maxi_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_eq_v4i32
 }
 
-define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_s_eq_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_s_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sge <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: maxi_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_s_eq_v2i64
 }
 
-define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @maxi_u_eq_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp uge <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: maxi_u.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_eq_v16i8
 }
 
-define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @maxi_u_eq_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp uge <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: maxi_u.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_eq_v8i16
 }
 
-define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @maxi_u_eq_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp uge <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: maxi_u.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_eq_v4i32
 }
 
-define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @maxi_u_eq_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: maxi_u_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp uge <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: maxi_u.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size maxi_u_eq_v2i64
 }
 
-define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_s_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_v16i8
 }
 
-define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_s_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_v8i16
 }
 
-define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_s_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_v4i32
 }
 
-define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_s_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp slt <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_v2i64
 }
 
-define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_u_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_v16i8
 }
 
-define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_u_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_v8i16
 }
 
-define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_u_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_v4i32
 }
 
-define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_u_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ult <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_v2i64
 }
 
-define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_s_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: min_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_eq_v16i8
 }
 
-define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_s_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: min_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_eq_v8i16
 }
 
-define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_s_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: min_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_eq_v4i32
 }
 
-define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_s_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_s_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp sle <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: min_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_s_eq_v2i64
 }
 
-define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @min_u_eq_v16i8(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <16 x i8> %1, %2
   %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2
   ; CHECK-DAG: min_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %4, <16 x i8>* %c
+  store <16 x i8> %4, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_eq_v16i8
 }
 
-define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @min_u_eq_v8i16(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <8 x i16> %1, %2
   %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2
   ; CHECK-DAG: min_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %4, <8 x i16>* %c
+  store <8 x i16> %4, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_eq_v8i16
 }
 
-define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @min_u_eq_v4i32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <4 x i32> %1, %2
   %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2
   ; CHECK-DAG: min_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_eq_v4i32
 }
 
-define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @min_u_eq_v2i64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_u_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = icmp ule <2 x i64> %1, %2
   %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2
   ; CHECK-DAG: min_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_u_eq_v2i64
 }
 
-define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_s_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_v16i8
 }
 
-define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_s_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_v8i16
 }
 
-define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_s_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_v4i32
 }
 
-define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_s_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp slt <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_v2i64
 }
 
-define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_u_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_v16i8
 }
 
-define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_u_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_v8i16
 }
 
-define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_u_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_v4i32
 }
 
-define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_u_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ult <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_v2i64
 }
 
-define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_s_eq_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: mini_s.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_eq_v16i8
 }
 
-define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_s_eq_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: mini_s.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_eq_v8i16
 }
 
-define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_s_eq_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: mini_s.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_eq_v4i32
 }
 
-define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_s_eq_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_s_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp sle <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: mini_s.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_s_eq_v2i64
 }
 
-define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @mini_u_eq_v16i8(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_eq_v16i8:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <16 x i8> %1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
   ; CHECK-DAG: mini_u.b [[R3:\$w[0-9]+]], [[R1]], 1
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_eq_v16i8
 }
 
-define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @mini_u_eq_v8i16(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_eq_v8i16:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <8 x i16> %1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   ; CHECK-DAG: mini_u.h [[R3:\$w[0-9]+]], [[R1]], 1
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_eq_v8i16
 }
 
-define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @mini_u_eq_v4i32(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_eq_v4i32:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1>
   %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: mini_u.w [[R3:\$w[0-9]+]], [[R1]], 1
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size mini_u_eq_v4i32
 }
 
-define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @mini_u_eq_v2i64(ptr %c, ptr %a) nounwind {
   ; CHECK: mini_u_eq_v2i64:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = icmp ule <2 x i64> %1, <i64 1, i64 1>
   %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> <i64 1, i64 1>
   ; CHECK-DAG: mini_u.d [[R3:\$w[0-9]+]], [[R1]], 1
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/compare_float.ll b/llvm/test/CodeGen/Mips/msa/compare_float.ll
index 2a19ada4d2b63..cd4924eca44cd 100644
--- a/llvm/test/CodeGen/Mips/msa/compare_float.ll
+++ b/llvm/test/CodeGen/Mips/msa/compare_float.ll
@@ -6,14 +6,14 @@ declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind
 declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind
 declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind
 
-define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @false_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: false_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
-  %2 = load <4 x float>, <4 x float>* %b
+  %1 = load <4 x float>, ptr %a
+  %2 = load <4 x float>, ptr %b
   %3 = fcmp false <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ret void
 
   ; (setcc $a, $b, SETFALSE) is always folded, so we won't get fcaf:
@@ -22,14 +22,14 @@ define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwi
   ; CHECK: .size false_v4f32
 }
 
-define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @false_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: false_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
-  %2 = load <2 x double>, <2 x double>* %b
+  %1 = load <2 x double>, ptr %a
+  %2 = load <2 x double>, ptr %b
   %3 = fcmp false <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ret void
 
   ; (setcc $a, $b, SETFALSE) is always folded
@@ -38,456 +38,456 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun
   ; CHECK: .size false_v2f64
 }
 
-define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @oeq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: oeq_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oeq <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fceq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size oeq_v4f32
 }
 
-define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @oeq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: oeq_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oeq <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fceq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size oeq_v2f64
 }
 
-define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @oge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: oge_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oge <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size oge_v4f32
 }
 
-define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @oge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: oge_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp oge <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size oge_v2f64
 }
 
-define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ogt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ogt_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ogt_v4f32
 }
 
-define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ogt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ogt_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ogt_v2f64
 }
 
-define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ole_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ole_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ole <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcle.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ole_v4f32
 }
 
-define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ole_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ole_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ole <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcle.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ole_v2f64
 }
 
-define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @olt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: olt_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp olt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fclt.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size olt_v4f32
 }
 
-define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @olt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: olt_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp olt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fclt.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size olt_v2f64
 }
 
-define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @one_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: one_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp one <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcne.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size one_v4f32
 }
 
-define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @one_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: one_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp one <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcne.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size one_v2f64
 }
 
-define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ord_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ord_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ord <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcor.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ord_v4f32
 }
 
-define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ord_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ord_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ord <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcor.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ord_v2f64
 }
 
-define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ueq_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ueq_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ueq <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcueq.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ueq_v4f32
 }
 
-define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ueq_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ueq_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ueq <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcueq.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ueq_v2f64
 }
 
-define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @uge_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: uge_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uge <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size uge_v4f32
 }
 
-define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @uge_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: uge_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uge <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size uge_v2f64
 }
 
-define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ugt_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ugt_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ugt <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ugt_v4f32
 }
 
-define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ugt_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ugt_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ugt <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ugt_v2f64
 }
 
-define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ule_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ule_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ule <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcule.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ule_v4f32
 }
 
-define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ule_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ule_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ule <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcule.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ule_v2f64
 }
 
-define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @ult_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ult_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ult <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcult.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ult_v4f32
 }
 
-define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @ult_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: ult_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ult <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcult.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size ult_v2f64
 }
 
-define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @uno_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: uno_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uno <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
   ; CHECK-DAG: fcun.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size uno_v4f32
 }
 
-define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @uno_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: uno_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp uno <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
   ; CHECK-DAG: fcun.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size uno_v2f64
 }
 
-define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @true_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: true_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
-  %2 = load <4 x float>, <4 x float>* %b
+  %1 = load <4 x float>, ptr %a
+  %2 = load <4 x float>, ptr %b
   %3 = fcmp true <4 x float> %1, %2
   %4 = sext <4 x i1> %3 to <4 x i32>
-  store <4 x i32> %4, <4 x i32>* %c
+  store <4 x i32> %4, ptr %c
   ret void
 
   ; (setcc $a, $b, SETTRUE) is always folded, so we won't get fcaf:
@@ -496,14 +496,14 @@ define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwin
   ; CHECK: .size true_v4f32
 }
 
-define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @true_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: true_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
-  %2 = load <2 x double>, <2 x double>* %b
+  %1 = load <2 x double>, ptr %a
+  %2 = load <2 x double>, ptr %b
   %3 = fcmp true <2 x double> %1, %2
   %4 = sext <2 x i1> %3 to <2 x i64>
-  store <2 x i64> %4, <2 x i64>* %c
+  store <2 x i64> %4, ptr %c
   ret void
 
   ; (setcc $a, $b, SETTRUE) is always folded.
@@ -512,148 +512,148 @@ define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounw
   ; CHECK: .size true_v2f64
 }
 
-define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
-                          <4 x float>* %c) nounwind {
+define void @bsel_v4f32(ptr %d, ptr %a, ptr %b,
+                          ptr %c) nounwind {
   ; CHECK: bsel_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <4 x float>, <4 x float>* %c
+  %3 = load <4 x float>, ptr %c
   ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7)
   %4 = fcmp ogt <4 x float> %1, %2
   ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <4 x i1> %4, <4 x float> %1, <4 x float> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <4 x float> %5, <4 x float>* %d
+  store <4 x float> %5, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_v4f32
 }
 
-define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
-                          <2 x double>* %c) nounwind {
+define void @bsel_v2f64(ptr %d, ptr %a, ptr %b,
+                          ptr %c) nounwind {
   ; CHECK: bsel_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
-  %3 = load <2 x double>, <2 x double>* %c
+  %3 = load <2 x double>, ptr %c
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7)
   %4 = fcmp ogt <2 x double> %1, %2
   ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %5 = select <2 x i1> %4, <2 x double> %1, <2 x double> %3
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3]], [[R1]]
-  store <2 x double> %5, <2 x double>* %d
+  store <2 x double> %5, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bsel_v2f64
 }
 
-define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b,
-                          <4 x float>* %c) nounwind {
+define void @bseli_v4f32(ptr %d, ptr %a, ptr %b,
+                          ptr %c) nounwind {
   ; CHECK: bseli_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <4 x float> %1, %2
   ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <4 x i1> %3, <4 x float> %1, <4 x float> zeroinitializer
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
-  store <4 x float> %4, <4 x float>* %d
+  store <4 x float> %4, ptr %d
   ; CHECK-DAG: st.w [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_v4f32
 }
 
-define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b,
-                          <2 x double>* %c) nounwind {
+define void @bseli_v2f64(ptr %d, ptr %a, ptr %b,
+                          ptr %c) nounwind {
   ; CHECK: bseli_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = fcmp ogt <2 x double> %1, %2
   ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]]
   %4 = select <2 x i1> %3, <2 x double> %1, <2 x double> zeroinitializer
   ; Note that IfSet and IfClr are swapped since the condition is inverted
   ; CHECK-DAG: bsel.v [[R4]], [[R3:\$w[0-9]+]], [[R1]]
-  store <2 x double> %4, <2 x double>* %d
+  store <2 x double> %4, ptr %d
   ; CHECK-DAG: st.d [[R4]], 0($4)
 
   ret void
   ; CHECK: .size bseli_v2f64
 }
 
-define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @max_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2)
   ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_v4f32
 }
 
-define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @max_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: max_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2)
   ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
   ; CHECK: .size max_v2f64
 }
 
-define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind {
+define void @min_v4f32(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_v4f32:
 
-  %1 = load <4 x float>, <4 x float>* %a
+  %1 = load <4 x float>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x float>, <4 x float>* %b
+  %2 = load <4 x float>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2)
   ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x float> %3, <4 x float>* %c
+  store <4 x float> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
   ; CHECK: .size min_v4f32
 }
 
-define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind {
+define void @min_v2f64(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK: min_v2f64:
 
-  %1 = load <2 x double>, <2 x double>* %a
+  %1 = load <2 x double>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x double>, <2 x double>* %b
+  %2 = load <2 x double>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2)
   ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x double> %3, <2 x double>* %c
+  store <2 x double> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/elm_copy.ll b/llvm/test/CodeGen/Mips/msa/elm_copy.ll
index 908c23421bb0e..6e0ee2da0920f 100644
--- a/llvm/test/CodeGen/Mips/msa/elm_copy.ll
+++ b/llvm/test/CodeGen/Mips/msa/elm_copy.ll
@@ -15,9 +15,9 @@
 
 define void @llvm_mips_copy_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_copy_s_b_ARG1
   %1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_s_b_RES
+  store i32 %1, ptr @llvm_mips_copy_s_b_RES
   ret void
 }
 
@@ -38,9 +38,9 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_copy_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_copy_s_h_ARG1
   %1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_s_h_RES
+  store i32 %1, ptr @llvm_mips_copy_s_h_RES
   ret void
 }
 
@@ -61,9 +61,9 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_copy_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_copy_s_w_ARG1
   %1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_s_w_RES
+  store i32 %1, ptr @llvm_mips_copy_s_w_RES
   ret void
 }
 
@@ -84,9 +84,9 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_copy_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_copy_s_d_ARG1
   %1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1)
-  store i64 %1, i64* @llvm_mips_copy_s_d_RES
+  store i64 %1, ptr @llvm_mips_copy_s_d_RES
   ret void
 }
 
@@ -112,9 +112,9 @@ declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_copy_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_copy_u_b_ARG1
   %1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_u_b_RES
+  store i32 %1, ptr @llvm_mips_copy_u_b_RES
   ret void
 }
 
@@ -135,9 +135,9 @@ declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_copy_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_copy_u_h_ARG1
   %1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_u_h_RES
+  store i32 %1, ptr @llvm_mips_copy_u_h_RES
   ret void
 }
 
@@ -158,9 +158,9 @@ declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_copy_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_copy_u_w_ARG1
   %1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1)
-  store i32 %1, i32* @llvm_mips_copy_u_w_RES
+  store i32 %1, ptr @llvm_mips_copy_u_w_RES
   ret void
 }
 
@@ -182,9 +182,9 @@ declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_copy_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_copy_u_d_ARG1
   %1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1)
-  store i64 %1, i64* @llvm_mips_copy_u_d_RES
+  store i64 %1, ptr @llvm_mips_copy_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/elm_insv.ll b/llvm/test/CodeGen/Mips/msa/elm_insv.ll
index a94a105e50056..6c00483cf6537 100644
--- a/llvm/test/CodeGen/Mips/msa/elm_insv.ll
+++ b/llvm/test/CodeGen/Mips/msa/elm_insv.ll
@@ -16,10 +16,10 @@
 
 define void @llvm_mips_insert_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insert_b_ARG1
-  %1 = load i32, i32* @llvm_mips_insert_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_insert_b_ARG1
+  %1 = load i32, ptr @llvm_mips_insert_b_ARG3
   %2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_insert_b_RES
   ret void
 }
 
@@ -38,10 +38,10 @@ declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind
 
 define void @llvm_mips_insert_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insert_h_ARG1
-  %1 = load i32, i32* @llvm_mips_insert_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_insert_h_ARG1
+  %1 = load i32, ptr @llvm_mips_insert_h_ARG3
   %2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_insert_h_RES
   ret void
 }
 
@@ -60,10 +60,10 @@ declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind
 
 define void @llvm_mips_insert_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insert_w_ARG1
-  %1 = load i32, i32* @llvm_mips_insert_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_insert_w_ARG1
+  %1 = load i32, ptr @llvm_mips_insert_w_ARG3
   %2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_insert_w_RES
   ret void
 }
 
@@ -82,10 +82,10 @@ declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind
 
 define void @llvm_mips_insert_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insert_d_ARG1
-  %1 = load i64, i64* @llvm_mips_insert_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_insert_d_ARG1
+  %1 = load i64, ptr @llvm_mips_insert_d_ARG3
   %2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_insert_d_RES
   ret void
 }
 
@@ -110,10 +110,10 @@ declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind
 
 define void @llvm_mips_insve_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_insve_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_insve_b_ARG3
   %2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_insve_b_RES
   ret void
 }
 
@@ -136,10 +136,10 @@ declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind
 
 define void @llvm_mips_insve_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_insve_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_insve_h_ARG3
   %2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_insve_h_RES
   ret void
 }
 
@@ -162,10 +162,10 @@ declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind
 
 define void @llvm_mips_insve_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_insve_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_insve_w_ARG3
   %2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_insve_w_RES
   ret void
 }
 
@@ -188,10 +188,10 @@ declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind
 
 define void @llvm_mips_insve_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_insve_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_insve_d_ARG3
   %2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_insve_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/elm_move.ll b/llvm/test/CodeGen/Mips/msa/elm_move.ll
index d2e2197d6536a..4065fc753a55e 100644
--- a/llvm/test/CodeGen/Mips/msa/elm_move.ll
+++ b/llvm/test/CodeGen/Mips/msa/elm_move.ll
@@ -9,9 +9,9 @@
 
 define void @llvm_mips_move_vb_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_move_vb_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_move_vb_ARG1
   %1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES
+  store <16 x i8> %1, ptr @llvm_mips_move_vb_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll b/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll
index 4ccf61c257302..548cdf394ac85 100644
--- a/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll
+++ b/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll
@@ -10,10 +10,10 @@
 
 define void @llvm_mips_sldi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sldi_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sldi_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sldi_b_RES
   ret void
 }
 
@@ -31,10 +31,10 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind
 
 define void @llvm_mips_sldi_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sldi_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sldi_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sldi_h_RES
   ret void
 }
 
@@ -52,10 +52,10 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind
 
 define void @llvm_mips_sldi_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sldi_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sldi_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sldi_w_RES
   ret void
 }
 
@@ -73,10 +73,10 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind
 
 define void @llvm_mips_sldi_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sldi_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sldi_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sldi_d_RES
   ret void
 }
 
@@ -93,9 +93,9 @@ declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind
 
 define void @llvm_mips_splati_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splati_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_splati_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_splati_b_RES
   ret void
 }
 
@@ -112,9 +112,9 @@ declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_splati_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splati_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_splati_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_splati_h_RES
   ret void
 }
 
@@ -131,9 +131,9 @@ declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_splati_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splati_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_splati_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_splati_w_RES
   ret void
 }
 
@@ -150,9 +150,9 @@ declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_splati_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splati_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_splati_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_splati_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/endian.ll b/llvm/test/CodeGen/Mips/msa/endian.ll
index 24e4fb6b24645..63aa3f6e18724 100644
--- a/llvm/test/CodeGen/Mips/msa/endian.ll
+++ b/llvm/test/CodeGen/Mips/msa/endian.ll
@@ -42,7 +42,7 @@ define void @const_v16i8() nounwind {
   ; BIGENDIAN: .byte 15
   ; BIGENDIAN: const_v16i8:
 
-  store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, <16 x i8>*@v16i8
+  store volatile <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, ptr @v16i8
 
   ret void
 }
@@ -67,7 +67,7 @@ define void @const_v8i16() nounwind {
   ; BIGENDIAN: .2byte 7
   ; BIGENDIAN: const_v8i16:
 
-  store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, <8 x i16>*@v8i16
+  store volatile <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, ptr @v8i16
 
   ret void
 }
@@ -84,7 +84,7 @@ define void @const_v4i32() nounwind {
   ; BIGENDIAN: .4byte 3
   ; BIGENDIAN: const_v4i32:
 
-  store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>*@v4i32
+  store volatile <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr @v4i32
 
   ret void
 }
@@ -101,7 +101,7 @@ define void @const_v2i64() nounwind {
   ; BIGENDIAN: .4byte 2
   ; BIGENDIAN: const_v2i64:
 
-  store volatile <2 x i64> <i64 1, i64 2>, <2 x i64>*@v2i64
+  store volatile <2 x i64> <i64 1, i64 2>, ptr @v2i64
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
index 513e108407b07..6a27c9f5dac9b 100644
--- a/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
+++ b/llvm/test/CodeGen/Mips/msa/f16-llvm-ir.ll
@@ -24,7 +24,7 @@
 
 @k = external global float
 
-declare float @k2(half *)
+declare float @k2(ptr)
 
 define void @f3(i16 %b) {
 ; MIPS32-LABEL: f3:
@@ -96,9 +96,9 @@ define void @f3(i16 %b) {
 entry:
   %0 = alloca half
   %1 = bitcast i16 %b to half
-  store half %1, half * %0
-  %2 = call float @k2(half * %0)
-  store float %2, float * @k
+  store half %1, ptr %0
+  %2 = call float @k2(ptr %0)
+  store float %2, ptr @k
   ret void
 }
 
@@ -158,7 +158,7 @@ define void  @f(i16 %b) {
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 16
   %1 = bitcast i16 %b to half
   %2 = fpext half %1 to float
-  store float %2, float * @k
+  store float %2, ptr @k
   ret void
 }
 
@@ -241,13 +241,13 @@ define void @fadd_f64() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load half, half * @h, align 2
+  %0 = load half, ptr @h, align 2
   %1 = fpext half %0 to double
-  %2 = load half, half * @h, align 2
+  %2 = load half, ptr @h, align 2
   %3 = fpext half %2 to double
   %add = fadd double %1, %3
   %4 = fptrunc double %add to half
-   store half %4, half * @h, align 2
+   store half %4, ptr @h, align 2
   ret void
 }
 
@@ -303,7 +303,7 @@ define i32 @ffptoui() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    mfc1 $2, $f0
 entry:
-  %0 = load half, half * @h, align 2
+  %0 = load half, ptr @h, align 2
   %1 = fptoui half %0 to i32
   ret i32 %1
 }
@@ -359,7 +359,7 @@ define i32 @ffptosi() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    mfc1 $2, $f0
 entry:
-  %0 = load half, half * @h, align 2
+  %0 = load half, ptr @h, align 2
   %1 = fptosi half %0 to i32
   ret i32 %1
 
@@ -456,7 +456,7 @@ entry:
 
 
   %0 = uitofp i32 %a to half
-  store half %0, half * @h, align 2
+  store half %0, ptr @h, align 2
   ret void
 }
 
@@ -522,18 +522,18 @@ define void @fadd() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %add = fadd float %1, %3
 
 
  %4 = call i16 @llvm.convert.to.fp16.f32(float %add)
 
-   store i16 %4, i16* @g, align 2
+   store i16 %4, ptr @g, align 2
   ret void
 }
 
@@ -602,11 +602,11 @@ define void @fsub() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %sub = fsub float %1, %3
 
@@ -614,7 +614,7 @@ entry:
   %4 = call i16 @llvm.convert.to.fp16.f32(float %sub)
 
 
-  store i16 %4, i16* @g, align 2
+  store i16 %4, ptr @g, align 2
   ret void
 }
 
@@ -676,11 +676,11 @@ define void @fmult() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %mul = fmul float %1, %3
 
@@ -688,7 +688,7 @@ entry:
   %4 = call i16 @llvm.convert.to.fp16.f32(float %mul)
 
 
-  store i16 %4, i16* @g, align 2
+  store i16 %4, ptr @g, align 2
 
   ret void
 }
@@ -752,18 +752,18 @@ define void @fdiv() {
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
 
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %div = fdiv float %1, %3
 
 
   %4 = call i16 @llvm.convert.to.fp16.f32(float %div)
 
-  store i16 %4, i16* @g, align 2
+  store i16 %4, ptr @g, align 2
   ret void
 }
 
@@ -864,11 +864,11 @@ define void @frem() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %rem = frem float %1, %3
 
@@ -876,7 +876,7 @@ entry:
   %4 = call i16 @llvm.convert.to.fp16.f32(float %rem)
 
 
-  store i16 %4, i16* @g, align 2
+  store i16 %4, ptr @g, align 2
 
   ret void
 }
@@ -995,16 +995,16 @@ define void @fcmp() {
 ; MIPSR6-N64-NEXT:    jr $ra
 ; MIPSR6-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
-  %2 = load i16, i16* @g, align 2
+  %2 = load i16, ptr @g, align 2
   %3 = call float @llvm.convert.from.fp16.f32(i16 %2)
   %fcmp = fcmp oeq float %1, %3
 
 
   %4 = zext i1 %fcmp to i16
-  store i16 %4, i16* @i1, align 2
+  store i16 %4, ptr @i1, align 2
 
   ret void
 }
@@ -1069,7 +1069,7 @@ define void @fpowi() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1079,7 +1079,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
   ret void
 }
 
@@ -1180,7 +1180,7 @@ define void @fpowi_var(i32 %var) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1190,7 +1190,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
   ret void
 }
 
@@ -1293,7 +1293,7 @@ define void @fpow(float %var) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1303,7 +1303,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %powi)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
   ret void
 }
 
@@ -1403,7 +1403,7 @@ define void @flog2() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1411,7 +1411,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %log2)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -1512,7 +1512,7 @@ define void @flog10() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1520,7 +1520,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %log10)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -1585,7 +1585,7 @@ define void @fsqrt() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1593,7 +1593,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %sqrt)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -1694,7 +1694,7 @@ define void @fsin() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1702,7 +1702,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %sin)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -1803,7 +1803,7 @@ define void @fcos() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -1811,7 +1811,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %cos)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -1912,14 +1912,14 @@ define void @fexp() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
   %exp = call float @llvm.exp.f32(float %1)
   %2 = call i16 @llvm.convert.to.fp16.f32(float %exp)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2020,7 +2020,7 @@ define void @fexp2() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2028,7 +2028,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %exp2)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2136,7 +2136,7 @@ define void @ffma(float %b, float %c) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2144,7 +2144,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %fma)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2272,7 +2272,7 @@ define void @ffmuladd(float %b, float %c) {
 ; MIPSR6-N64-NEXT:    jr $ra
 ; MIPSR6-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 ; MIPS32-N32:     madd.s $f[[F1:[0-9]]], $f13, $f[[F0]], $f12
@@ -2282,7 +2282,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %fmuladd)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2347,7 +2347,7 @@ define void @ffabs() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2355,7 +2355,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %fabs)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2459,7 +2459,7 @@ define void @fminnum(float %b) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2467,7 +2467,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %minnum)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2571,7 +2571,7 @@ define void @fmaxnum(float %b) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2579,7 +2579,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %maxnum)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2646,7 +2646,7 @@ define void @fcopysign(float %b) {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    sh $2, 0($1)
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2654,7 +2654,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %copysign)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2755,7 +2755,7 @@ define void @ffloor() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2763,7 +2763,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %floor)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2864,7 +2864,7 @@ define void @fceil() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2872,7 +2872,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %ceil)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -2973,7 +2973,7 @@ define void @ftrunc() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -2981,7 +2981,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %trunc)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -3082,13 +3082,13 @@ define void @frint() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
   %rint = call float @llvm.rint.f32(float %1)
   %2 = call i16 @llvm.convert.to.fp16.f32(float %rint)
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -3189,7 +3189,7 @@ define void @fnearbyint() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -3197,7 +3197,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %nearbyint)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }
@@ -3298,7 +3298,7 @@ define void @fround() {
 ; MIPS64-N64-NEXT:    jr $ra
 ; MIPS64-N64-NEXT:    daddiu $sp, $sp, 32
 entry:
-  %0 = load i16, i16* @g, align 2
+  %0 = load i16, ptr @g, align 2
   %1 = call float @llvm.convert.from.fp16.f32(i16 %0)
 
 
@@ -3306,7 +3306,7 @@ entry:
   %2 = call i16 @llvm.convert.to.fp16.f32(float %round)
 
 
-  store i16 %2, i16* @g, align 2
+  store i16 %2, ptr @g, align 2
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/fexuprl.ll b/llvm/test/CodeGen/Mips/msa/fexuprl.ll
index 2d0019c48003e..abd9d164d7c6b 100644
--- a/llvm/test/CodeGen/Mips/msa/fexuprl.ll
+++ b/llvm/test/CodeGen/Mips/msa/fexuprl.ll
@@ -8,15 +8,15 @@
 
 define i32 @test() local_unnamed_addr {
 entry:
-  %0 = load <8 x half>, <8 x half>* @g, align 16
+  %0 = load <8 x half>, ptr @g, align 16
   %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0)
-  store <4 x float> %1, <4 x float>* @i, align 16
+  store <4 x float> %1, ptr @i, align 16
 ; CHECK: ld.h $w[[W0:[0-9]+]], 0(${{[0-9]+}})
 ; CHECK: fexupl.w $w[[W1:[0-9]+]], $w[[W0]]
 ; CHECK: st.w $w[[W1]], 0(${{[0-9]+}})
 
   %2 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0)
-  store <4 x float> %2, <4 x float>* @j, align 16
+  store <4 x float> %2, ptr @j, align 16
 
 ; CHECK: fexupr.w $w[[W2:[0-9]+]], $w[[W0]]
 ; CHECK: st.w $w[[W2]], 0(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/msa/frameindex.ll b/llvm/test/CodeGen/Mips/msa/frameindex.ll
index 94c3fd85a565a..1ee527bd88a4d 100644
--- a/llvm/test/CodeGen/Mips/msa/frameindex.ll
+++ b/llvm/test/CodeGen/Mips/msa/frameindex.ll
@@ -5,9 +5,9 @@ define void @loadstore_v16i8_near() nounwind {
   ; CHECK: loadstore_v16i8_near:
 
   %1 = alloca <16 x i8>
-  %2 = load volatile <16 x i8>, <16 x i8>* %1
+  %2 = load volatile <16 x i8>, ptr %1
   ; CHECK: ld.b [[R1:\$w[0-9]+]], 0($sp)
-  store volatile <16 x i8> %2, <16 x i8>* %1
+  store volatile <16 x i8> %2, ptr %1
   ; CHECK: st.b [[R1]], 0($sp)
 
   ret void
@@ -21,9 +21,9 @@ define void @loadstore_v16i8_just_under_simm10() nounwind {
   %2 = alloca [492 x i8] ; Push the frame--acounting for the emergency spill
                          ; slot--right up to 512 bytes
 
-  %3 = load volatile <16 x i8>, <16 x i8>* %1
+  %3 = load volatile <16 x i8>, ptr %1
   ; CHECK: ld.b [[R1:\$w[0-9]+]], 496($sp)
-  store volatile <16 x i8> %3, <16 x i8>* %1
+  store volatile <16 x i8> %3, ptr %1
   ; CHECK: st.b [[R1]], 496($sp)
 
   ret void
@@ -37,10 +37,10 @@ define void @loadstore_v16i8_just_over_simm10() nounwind {
   %2 = alloca [497 x i8] ; Push the frame--acounting for the emergency spill
                          ; slot--right up to 512 bytes
 
-  %3 = load volatile <16 x i8>, <16 x i8>* %1
+  %3 = load volatile <16 x i8>, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
   ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <16 x i8> %3, <16 x i8>* %1
+  store volatile <16 x i8> %3, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
   ; CHECK: st.b [[R1]], 0([[BASE]])
 
@@ -55,11 +55,11 @@ define void @loadstore_v16i8_just_under_simm16() nounwind {
   %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--right up to 32768 bytes
 
-  %3 = load volatile <16 x i8>, <16 x i8>* %1
+  %3 = load volatile <16 x i8>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <16 x i8> %3, <16 x i8>* %1
+  store volatile <16 x i8> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.b [[R1]], 0([[BASE]])
@@ -75,11 +75,11 @@ define void @loadstore_v16i8_just_over_simm16() nounwind {
   %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--just over 32768 bytes
 
-  %3 = load volatile <16 x i8>, <16 x i8>* %1
+  %3 = load volatile <16 x i8>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <16 x i8> %3, <16 x i8>* %1
+  store volatile <16 x i8> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.b [[R1]], 0([[BASE]])
@@ -92,9 +92,9 @@ define void @loadstore_v8i16_near() nounwind {
   ; CHECK: loadstore_v8i16_near:
 
   %1 = alloca <8 x i16>
-  %2 = load volatile <8 x i16>, <8 x i16>* %1
+  %2 = load volatile <8 x i16>, ptr %1
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 0($sp)
-  store volatile <8 x i16> %2, <8 x i16>* %1
+  store volatile <8 x i16> %2, ptr %1
   ; CHECK: st.h [[R1]], 0($sp)
 
   ret void
@@ -105,15 +105,12 @@ define void @loadstore_v8i16_unaligned() nounwind {
   ; CHECK: loadstore_v8i16_unaligned:
 
   %1 = alloca [2 x <8 x i16>]
-  %2 = bitcast [2 x <8 x i16>]* %1 to i8*
-  %3 = getelementptr i8, i8* %2, i32 1
-  %4 = bitcast i8* %3 to [2 x <8 x i16>]*
-  %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0
+  %2 = getelementptr i8, ptr %1, i32 1
 
-  %6 = load volatile <8 x i16>, <8 x i16>* %5
+  %3 = load volatile <8 x i16>, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <8 x i16> %6, <8 x i16>* %5
+  store volatile <8 x i16> %3, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: st.h [[R1]], 0([[BASE]])
 
@@ -128,9 +125,9 @@ define void @loadstore_v8i16_just_under_simm10() nounwind {
   %2 = alloca [1004 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--right up to 1024 bytes
 
-  %3 = load volatile <8 x i16>, <8 x i16>* %1
+  %3 = load volatile <8 x i16>, ptr %1
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 1008($sp)
-  store volatile <8 x i16> %3, <8 x i16>* %1
+  store volatile <8 x i16> %3, ptr %1
   ; CHECK: st.h [[R1]], 1008($sp)
 
   ret void
@@ -144,10 +141,10 @@ define void @loadstore_v8i16_just_over_simm10() nounwind {
   %2 = alloca [1009 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--just over 1024 bytes
 
-  %3 = load volatile <8 x i16>, <8 x i16>* %1
+  %3 = load volatile <8 x i16>, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <8 x i16> %3, <8 x i16>* %1
+  store volatile <8 x i16> %3, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
   ; CHECK: st.h [[R1]], 0([[BASE]])
 
@@ -162,11 +159,11 @@ define void @loadstore_v8i16_just_under_simm16() nounwind {
   %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--right up to 32768 bytes
 
-  %3 = load volatile <8 x i16>, <8 x i16>* %1
+  %3 = load volatile <8 x i16>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <8 x i16> %3, <8 x i16>* %1
+  store volatile <8 x i16> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.h [[R1]], 0([[BASE]])
@@ -182,11 +179,11 @@ define void @loadstore_v8i16_just_over_simm16() nounwind {
   %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--just over 32768 bytes
 
-  %3 = load volatile <8 x i16>, <8 x i16>* %1
+  %3 = load volatile <8 x i16>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <8 x i16> %3, <8 x i16>* %1
+  store volatile <8 x i16> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.h [[R1]], 0([[BASE]])
@@ -199,9 +196,9 @@ define void @loadstore_v4i32_near() nounwind {
   ; CHECK: loadstore_v4i32_near:
 
   %1 = alloca <4 x i32>
-  %2 = load volatile <4 x i32>, <4 x i32>* %1
+  %2 = load volatile <4 x i32>, ptr %1
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 0($sp)
-  store volatile <4 x i32> %2, <4 x i32>* %1
+  store volatile <4 x i32> %2, ptr %1
   ; CHECK: st.w [[R1]], 0($sp)
 
   ret void
@@ -212,15 +209,12 @@ define void @loadstore_v4i32_unaligned() nounwind {
   ; CHECK: loadstore_v4i32_unaligned:
 
   %1 = alloca [2 x <4 x i32>]
-  %2 = bitcast [2 x <4 x i32>]* %1 to i8*
-  %3 = getelementptr i8, i8* %2, i32 1
-  %4 = bitcast i8* %3 to [2 x <4 x i32>]*
-  %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0
+  %2 = getelementptr i8, ptr %1, i32 1
 
-  %6 = load volatile <4 x i32>, <4 x i32>* %5
+  %3 = load volatile <4 x i32>, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <4 x i32> %6, <4 x i32>* %5
+  store volatile <4 x i32> %3, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: st.w [[R1]], 0([[BASE]])
 
@@ -235,9 +229,9 @@ define void @loadstore_v4i32_just_under_simm10() nounwind {
   %2 = alloca [2028 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--right up to 2048 bytes
 
-  %3 = load volatile <4 x i32>, <4 x i32>* %1
+  %3 = load volatile <4 x i32>, ptr %1
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 2032($sp)
-  store volatile <4 x i32> %3, <4 x i32>* %1
+  store volatile <4 x i32> %3, ptr %1
   ; CHECK: st.w [[R1]], 2032($sp)
 
   ret void
@@ -251,10 +245,10 @@ define void @loadstore_v4i32_just_over_simm10() nounwind {
   %2 = alloca [2033 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--just over 2048 bytes
 
-  %3 = load volatile <4 x i32>, <4 x i32>* %1
+  %3 = load volatile <4 x i32>, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <4 x i32> %3, <4 x i32>* %1
+  store volatile <4 x i32> %3, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
   ; CHECK: st.w [[R1]], 0([[BASE]])
 
@@ -269,11 +263,11 @@ define void @loadstore_v4i32_just_under_simm16() nounwind {
   %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot-- right up to 32768 bytes
 
-  %3 = load volatile <4 x i32>, <4 x i32>* %1
+  %3 = load volatile <4 x i32>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <4 x i32> %3, <4 x i32>* %1
+  store volatile <4 x i32> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.w [[R1]], 0([[BASE]])
@@ -289,11 +283,11 @@ define void @loadstore_v4i32_just_over_simm16() nounwind {
   %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--just over 32768 bytes
 
-  %3 = load volatile <4 x i32>, <4 x i32>* %1
+  %3 = load volatile <4 x i32>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <4 x i32> %3, <4 x i32>* %1
+  store volatile <4 x i32> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.w [[R1]], 0([[BASE]])
@@ -306,9 +300,9 @@ define void @loadstore_v2i64_near() nounwind {
   ; CHECK: loadstore_v2i64_near:
 
   %1 = alloca <2 x i64>
-  %2 = load volatile <2 x i64>, <2 x i64>* %1
+  %2 = load volatile <2 x i64>, ptr %1
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 0($sp)
-  store volatile <2 x i64> %2, <2 x i64>* %1
+  store volatile <2 x i64> %2, ptr %1
   ; CHECK: st.d [[R1]], 0($sp)
 
   ret void
@@ -319,15 +313,12 @@ define void @loadstore_v2i64_unaligned() nounwind {
   ; CHECK: loadstore_v2i64_unaligned:
 
   %1 = alloca [2 x <2 x i64>]
-  %2 = bitcast [2 x <2 x i64>]* %1 to i8*
-  %3 = getelementptr i8, i8* %2, i32 1
-  %4 = bitcast i8* %3 to [2 x <2 x i64>]*
-  %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0
+  %2 = getelementptr i8, ptr %1, i32 1
 
-  %6 = load volatile <2 x i64>, <2 x i64>* %5
+  %3 = load volatile <2 x i64>, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <2 x i64> %6, <2 x i64>* %5
+  store volatile <2 x i64> %3, ptr %2
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
   ; CHECK: st.d [[R1]], 0([[BASE]])
 
@@ -341,9 +332,9 @@ define void @loadstore_v2i64_just_under_simm10() nounwind {
   %1 = alloca <2 x i64>
   %2 = alloca [4076 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--right up to 4096 bytes
-  %3 = load volatile <2 x i64>, <2 x i64>* %1
+  %3 = load volatile <2 x i64>, ptr %1
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 4080($sp)
-  store volatile <2 x i64> %3, <2 x i64>* %1
+  store volatile <2 x i64> %3, ptr %1
   ; CHECK: st.d [[R1]], 4080($sp)
 
   ret void
@@ -357,10 +348,10 @@ define void @loadstore_v2i64_just_over_simm10() nounwind {
   %2 = alloca [4081 x i8] ; Push the frame--acounting for the emergency spill
                           ; slot--just over 4096 bytes
 
-  %3 = load volatile <2 x i64>, <2 x i64>* %1
+  %3 = load volatile <2 x i64>, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <2 x i64> %3, <2 x i64>* %1
+  store volatile <2 x i64> %3, ptr %1
   ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
   ; CHECK: st.d [[R1]], 0([[BASE]])
 
@@ -375,11 +366,11 @@ define void @loadstore_v2i64_just_under_simm16() nounwind {
   %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--right up to 32768 bytes
 
-  %3 = load volatile <2 x i64>, <2 x i64>* %1
+  %3 = load volatile <2 x i64>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <2 x i64> %3, <2 x i64>* %1
+  store volatile <2 x i64> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.d [[R1]], 0([[BASE]])
@@ -395,11 +386,11 @@ define void @loadstore_v2i64_just_over_simm16() nounwind {
   %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
                            ; slot--just over 32768 bytes
 
-  %3 = load volatile <2 x i64>, <2 x i64>* %1
+  %3 = load volatile <2 x i64>, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
-  store volatile <2 x i64> %3, <2 x i64>* %1
+  store volatile <2 x i64> %3, ptr %1
   ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
   ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
   ; CHECK: st.d [[R1]], 0([[BASE]])

diff  --git a/llvm/test/CodeGen/Mips/msa/i10.ll b/llvm/test/CodeGen/Mips/msa/i10.ll
index 1047ddc9bb34a..e130d6df4b90c 100644
--- a/llvm/test/CodeGen/Mips/msa/i10.ll
+++ b/llvm/test/CodeGen/Mips/msa/i10.ll
@@ -7,7 +7,7 @@
 
 define i32 @llvm_mips_bnz_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bnz_b_ARG1
   %1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false
@@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind
 
 define i32 @llvm_mips_bnz_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnz_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_bnz_h_ARG1
   %1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false
@@ -49,7 +49,7 @@ declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind
 
 define i32 @llvm_mips_bnz_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnz_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_bnz_w_ARG1
   %1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false
@@ -70,7 +70,7 @@ declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind
 
 define i32 @llvm_mips_bnz_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnz_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_bnz_d_ARG1
   %1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false
@@ -93,9 +93,9 @@ declare i32 @llvm.mips.bnz.d(<2 x i64>) nounwind
 define void @llvm_mips_ldi_b_test() nounwind {
 entry:
   %0 = call <16 x i8> @llvm.mips.ldi.b(i32 3)
-  store <16 x i8> %0, <16 x i8>* @llvm_mips_ldi_b_RES1
+  store <16 x i8> %0, ptr @llvm_mips_ldi_b_RES1
   %1 = call <16 x i8> @llvm.mips.ldi.b(i32 -3)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ldi_b_RES2
+  store <16 x i8> %1, ptr @llvm_mips_ldi_b_RES2
   ret void
 }
 
@@ -111,9 +111,9 @@ declare <16 x i8> @llvm.mips.ldi.b(i32)
 define void @llvm_mips_ldi_h_test() nounwind {
 entry:
   %0 = call <8 x i16> @llvm.mips.ldi.h(i32 3)
-  store <8 x i16> %0, <8 x i16>* @llvm_mips_ldi_h_RES1
+  store <8 x i16> %0, ptr @llvm_mips_ldi_h_RES1
   %1 = call <8 x i16> @llvm.mips.ldi.h(i32 -3)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ldi_h_RES2
+  store <8 x i16> %1, ptr @llvm_mips_ldi_h_RES2
   ret void
 }
 
@@ -129,9 +129,9 @@ declare <8 x i16> @llvm.mips.ldi.h(i32)
 define void @llvm_mips_ldi_w_test() nounwind {
 entry:
   %0 = call <4 x i32> @llvm.mips.ldi.w(i32 3)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_ldi_w_RES1
+  store <4 x i32> %0, ptr @llvm_mips_ldi_w_RES1
   %1 = call <4 x i32> @llvm.mips.ldi.w(i32 -3)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ldi_w_RES2
+  store <4 x i32> %1, ptr @llvm_mips_ldi_w_RES2
   ret void
 }
 
@@ -147,9 +147,9 @@ declare <4 x i32> @llvm.mips.ldi.w(i32)
 define void @llvm_mips_ldi_d_test() nounwind {
 entry:
   %0 = call <2 x i64> @llvm.mips.ldi.d(i32 3)
-  store <2 x i64> %0, <2 x i64>* @llvm_mips_ldi_d_RES1
+  store <2 x i64> %0, ptr @llvm_mips_ldi_d_RES1
   %1 = call <2 x i64> @llvm.mips.ldi.d(i32 -3)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ldi_d_RES2
+  store <2 x i64> %1, ptr @llvm_mips_ldi_d_RES2
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i5-a.ll b/llvm/test/CodeGen/Mips/msa/i5-a.ll
index be49efe31ef05..7fd14da4b5f06 100644
--- a/llvm/test/CodeGen/Mips/msa/i5-a.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5-a.ll
@@ -9,9 +9,9 @@
 
 define void @llvm_mips_addvi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addvi_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_addvi_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_addvi_b_RES
   ret void
 }
 
@@ -28,9 +28,9 @@ declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_addvi_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addvi_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_addvi_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_addvi_h_RES
   ret void
 }
 
@@ -47,9 +47,9 @@ declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_addvi_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addvi_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_addvi_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_addvi_w_RES
   ret void
 }
 
@@ -66,9 +66,9 @@ declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_addvi_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addvi_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_addvi_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_addvi_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i5-b.ll b/llvm/test/CodeGen/Mips/msa/i5-b.ll
index 2dfea3f642ab5..4a36bfe540d14 100644
--- a/llvm/test/CodeGen/Mips/msa/i5-b.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5-b.ll
@@ -21,9 +21,9 @@ define void @llvm_mips_bclri_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclri_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bclri_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_bclri_b_RES
   ret void
 }
 declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind
@@ -44,9 +44,9 @@ define void @llvm_mips_bclri_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclri_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_bclri_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_bclri_h_RES
   ret void
 }
 declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind
@@ -67,9 +67,9 @@ define void @llvm_mips_bclri_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclri_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_bclri_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_bclri_w_RES
   ret void
 }
 declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind
@@ -90,9 +90,9 @@ define void @llvm_mips_bclri_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclri_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_bclri_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_bclri_d_RES
   ret void
 }
 declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind
@@ -116,10 +116,10 @@ define void @llvm_mips_binsli_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w1, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_binsli_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_binsli_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 6)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_binsli_b_RES
   ret void
 }
 declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind
@@ -143,10 +143,10 @@ define void @llvm_mips_binsli_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w1, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_binsli_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_binsli_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_binsli_h_RES
   ret void
 }
 declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind
@@ -170,10 +170,10 @@ define void @llvm_mips_binsli_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w1, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_binsli_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_binsli_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_binsli_w_RES
   ret void
 }
 declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind
@@ -197,14 +197,14 @@ define void @llvm_mips_binsli_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w1, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_binsli_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_binsli_d_ARG2
   ; TODO: We use a particularly wide mask here to work around a legalization
   ;       issue. If the mask doesn't fit within a 10-bit immediate, it gets
   ;       legalized into a constant pool. We should add a test to cover the
   ;       other cases once they correctly select binsli.d.
   %2 = tail call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %0, <2 x i64> %1, i32 61)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_binsli_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_binsli_d_RES
   ret void
 }
 declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind
@@ -228,10 +228,10 @@ define void @llvm_mips_binsri_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w1, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_binsri_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_binsri_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 6)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_binsri_b_RES
   ret void
 }
 declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind
@@ -255,10 +255,10 @@ define void @llvm_mips_binsri_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w1, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_binsri_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_binsri_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_binsri_h_RES
   ret void
 }
 declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind
@@ -282,10 +282,10 @@ define void @llvm_mips_binsri_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w1, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_binsri_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_binsri_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_binsri_w_RES
   ret void
 }
 declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind
@@ -309,10 +309,10 @@ define void @llvm_mips_binsri_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w1, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_binsri_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_binsri_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_binsri_d_RES
   ret void
 }
 declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind
@@ -333,9 +333,9 @@ define void @llvm_mips_bnegi_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnegi_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bnegi_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_bnegi_b_RES
   ret void
 }
 declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind
@@ -356,9 +356,9 @@ define void @llvm_mips_bnegi_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnegi_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_bnegi_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_bnegi_h_RES
   ret void
 }
 declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind
@@ -379,9 +379,9 @@ define void @llvm_mips_bnegi_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnegi_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_bnegi_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_bnegi_w_RES
   ret void
 }
 declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind
@@ -402,9 +402,9 @@ define void @llvm_mips_bnegi_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnegi_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_bnegi_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_bnegi_d_RES
   ret void
 }
 declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind
@@ -425,9 +425,9 @@ define void @llvm_mips_bseti_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseti_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bseti_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_bseti_b_RES
   ret void
 }
 declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind
@@ -448,9 +448,9 @@ define void @llvm_mips_bseti_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bseti_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_bseti_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_bseti_h_RES
   ret void
 }
 declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind
@@ -471,9 +471,9 @@ define void @llvm_mips_bseti_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bseti_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_bseti_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_bseti_w_RES
   ret void
 }
 declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind
@@ -494,9 +494,9 @@ define void @llvm_mips_bseti_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bseti_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_bseti_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_bseti_d_RES
   ret void
 }
 declare <2 x i64> @llvm.mips.bseti.d(<2 x i64>, i32) nounwind

diff  --git a/llvm/test/CodeGen/Mips/msa/i5-c.ll b/llvm/test/CodeGen/Mips/msa/i5-c.ll
index b85cc5fdee3bf..96f5e6286276a 100644
--- a/llvm/test/CodeGen/Mips/msa/i5-c.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5-c.ll
@@ -10,11 +10,11 @@
 
 define void @llvm_mips_ceqi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceqi_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_ceqi_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES1
+  store <16 x i8> %1, ptr @llvm_mips_ceqi_b_RES1
   %2 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 -14)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ceqi_b_RES2
+  store <16 x i8> %2, ptr @llvm_mips_ceqi_b_RES2
   ret void
 }
 
@@ -34,11 +34,11 @@ declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_ceqi_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceqi_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_ceqi_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES1
+  store <8 x i16> %1, ptr @llvm_mips_ceqi_h_RES1
   %2 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 -14)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ceqi_h_RES2
+  store <8 x i16> %2, ptr @llvm_mips_ceqi_h_RES2
   ret void
 }
 
@@ -58,11 +58,11 @@ declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_ceqi_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceqi_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_ceqi_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES1
+  store <4 x i32> %1, ptr @llvm_mips_ceqi_w_RES1
   %2 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 -14)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ceqi_w_RES2
+  store <4 x i32> %2, ptr @llvm_mips_ceqi_w_RES2
   ret void
 }
 
@@ -82,11 +82,11 @@ declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_ceqi_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceqi_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_ceqi_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES1
+  store <2 x i64> %1, ptr @llvm_mips_ceqi_d_RES1
   %2 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 -14)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ceqi_d_RES2
+  store <2 x i64> %2, ptr @llvm_mips_ceqi_d_RES2
   ret void
 }
 
@@ -106,11 +106,11 @@ declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_clei_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_clei_s_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES1
+  store <16 x i8> %1, ptr @llvm_mips_clei_s_b_RES1
   %2 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 -14)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_clei_s_b_RES2
+  store <16 x i8> %2, ptr @llvm_mips_clei_s_b_RES2
   ret void
 }
 
@@ -130,11 +130,11 @@ declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_clei_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_clei_s_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES1
+  store <8 x i16> %1, ptr @llvm_mips_clei_s_h_RES1
   %2 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 -14)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_clei_s_h_RES2
+  store <8 x i16> %2, ptr @llvm_mips_clei_s_h_RES2
   ret void
 }
 
@@ -154,11 +154,11 @@ declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_clei_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_clei_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES1
+  store <4 x i32> %1, ptr @llvm_mips_clei_s_w_RES1
   %2 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 -14)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_clei_s_w_RES2
+  store <4 x i32> %2, ptr @llvm_mips_clei_s_w_RES2
   ret void
 }
 
@@ -178,11 +178,11 @@ declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_clei_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_clei_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES1
+  store <2 x i64> %1, ptr @llvm_mips_clei_s_d_RES1
   %2 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 -14)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_clei_s_d_RES2
+  store <2 x i64> %2, ptr @llvm_mips_clei_s_d_RES2
   ret void
 }
 
@@ -201,9 +201,9 @@ declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_clei_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_clei_u_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_clei_u_b_RES
   ret void
 }
 
@@ -220,9 +220,9 @@ declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_clei_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_clei_u_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_clei_u_h_RES
   ret void
 }
 
@@ -239,9 +239,9 @@ declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_clei_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_clei_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_clei_u_w_RES
   ret void
 }
 
@@ -258,9 +258,9 @@ declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_clei_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_clei_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_clei_u_d_RES
   ret void
 }
 
@@ -278,11 +278,11 @@ declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_clti_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_clti_s_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES1
+  store <16 x i8> %1, ptr @llvm_mips_clti_s_b_RES1
   %2 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 -14)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_clti_s_b_RES2
+  store <16 x i8> %2, ptr @llvm_mips_clti_s_b_RES2
   ret void
 }
 
@@ -302,11 +302,11 @@ declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_clti_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_clti_s_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES1
+  store <8 x i16> %1, ptr @llvm_mips_clti_s_h_RES1
   %2 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 -14)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_clti_s_h_RES2
+  store <8 x i16> %2, ptr @llvm_mips_clti_s_h_RES2
   ret void
 }
 
@@ -326,11 +326,11 @@ declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_clti_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_clti_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES1
+  store <4 x i32> %1, ptr @llvm_mips_clti_s_w_RES1
   %2 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 -14)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_clti_s_w_RES2
+  store <4 x i32> %2, ptr @llvm_mips_clti_s_w_RES2
   ret void
 }
 
@@ -350,11 +350,11 @@ declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_clti_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_clti_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES1
+  store <2 x i64> %1, ptr @llvm_mips_clti_s_d_RES1
   %2 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 -14)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_clti_s_d_RES2
+  store <2 x i64> %2, ptr @llvm_mips_clti_s_d_RES2
   ret void
 }
 
@@ -373,9 +373,9 @@ declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_clti_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_clti_u_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_clti_u_b_RES
   ret void
 }
 
@@ -392,9 +392,9 @@ declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_clti_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_clti_u_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_clti_u_h_RES
   ret void
 }
 
@@ -411,9 +411,9 @@ declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_clti_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_clti_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_clti_u_w_RES
   ret void
 }
 
@@ -430,9 +430,9 @@ declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_clti_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_clti_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_clti_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i5-m.ll b/llvm/test/CodeGen/Mips/msa/i5-m.ll
index 9877136523cdd..74599185963a6 100644
--- a/llvm/test/CodeGen/Mips/msa/i5-m.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5-m.ll
@@ -10,11 +10,11 @@
 
 define void @llvm_mips_maxi_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_maxi_s_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES1
+  store <16 x i8> %1, ptr @llvm_mips_maxi_s_b_RES1
   %2 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 -14)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_maxi_s_b_RES2
+  store <16 x i8> %2, ptr @llvm_mips_maxi_s_b_RES2
   ret void
 }
 
@@ -34,11 +34,11 @@ declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_maxi_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_maxi_s_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES1
+  store <8 x i16> %1, ptr @llvm_mips_maxi_s_h_RES1
   %2 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 -14)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_maxi_s_h_RES2
+  store <8 x i16> %2, ptr @llvm_mips_maxi_s_h_RES2
   ret void
 }
 
@@ -58,11 +58,11 @@ declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_maxi_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_maxi_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES1
+  store <4 x i32> %1, ptr @llvm_mips_maxi_s_w_RES1
   %2 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 -14)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_maxi_s_w_RES2
+  store <4 x i32> %2, ptr @llvm_mips_maxi_s_w_RES2
   ret void
 }
 
@@ -82,11 +82,11 @@ declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_maxi_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_maxi_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES1
+  store <2 x i64> %1, ptr @llvm_mips_maxi_s_d_RES1
   %2 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 -14)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_maxi_s_d_RES2
+  store <2 x i64> %2, ptr @llvm_mips_maxi_s_d_RES2
   ret void
 }
 
@@ -105,9 +105,9 @@ declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_maxi_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_maxi_u_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_maxi_u_b_RES
   ret void
 }
 
@@ -124,9 +124,9 @@ declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_maxi_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_maxi_u_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_maxi_u_h_RES
   ret void
 }
 
@@ -143,9 +143,9 @@ declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_maxi_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_maxi_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_maxi_u_w_RES
   ret void
 }
 
@@ -162,9 +162,9 @@ declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_maxi_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_maxi_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_maxi_u_d_RES
   ret void
 }
 
@@ -182,11 +182,11 @@ declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_mini_s_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_s_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_mini_s_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES1
+  store <16 x i8> %1, ptr @llvm_mips_mini_s_b_RES1
   %2 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 -14)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_mini_s_b_RES2
+  store <16 x i8> %2, ptr @llvm_mips_mini_s_b_RES2
   ret void
 }
 
@@ -206,11 +206,11 @@ declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_mini_s_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_s_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_mini_s_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES1
+  store <8 x i16> %1, ptr @llvm_mips_mini_s_h_RES1
   %2 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 -14)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_mini_s_h_RES2
+  store <8 x i16> %2, ptr @llvm_mips_mini_s_h_RES2
   ret void
 }
 
@@ -230,11 +230,11 @@ declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_mini_s_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_s_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_mini_s_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES1
+  store <4 x i32> %1, ptr @llvm_mips_mini_s_w_RES1
   %2 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 -14)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_mini_s_w_RES2
+  store <4 x i32> %2, ptr @llvm_mips_mini_s_w_RES2
   ret void
 }
 
@@ -254,11 +254,11 @@ declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_mini_s_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_s_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_mini_s_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES1
+  store <2 x i64> %1, ptr @llvm_mips_mini_s_d_RES1
   %2 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 -14)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_mini_s_d_RES2
+  store <2 x i64> %2, ptr @llvm_mips_mini_s_d_RES2
   ret void
 }
 
@@ -277,9 +277,9 @@ declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind
 
 define void @llvm_mips_mini_u_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_u_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_mini_u_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_mini_u_b_RES
   ret void
 }
 
@@ -296,9 +296,9 @@ declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_mini_u_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_u_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_mini_u_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_mini_u_h_RES
   ret void
 }
 
@@ -315,9 +315,9 @@ declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_mini_u_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_u_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_mini_u_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_mini_u_w_RES
   ret void
 }
 
@@ -334,9 +334,9 @@ declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_mini_u_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_u_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_mini_u_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_mini_u_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i5-s.ll b/llvm/test/CodeGen/Mips/msa/i5-s.ll
index 179320e0813c8..0ba71fa9acd3f 100644
--- a/llvm/test/CodeGen/Mips/msa/i5-s.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5-s.ll
@@ -20,9 +20,9 @@ define void @llvm_mips_subvi_b_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.b $w0, 0($1)
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_subvi_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_subvi_b_RES
   ret void
 }
 
@@ -43,9 +43,9 @@ define void @llvm_mips_subvi_h_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.h $w0, 0($1)
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_subvi_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_subvi_h_RES
   ret void
 }
 
@@ -66,9 +66,9 @@ define void @llvm_mips_subvi_w_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.w $w0, 0($1)
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_subvi_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_subvi_w_RES
   ret void
 }
 
@@ -89,9 +89,9 @@ define void @llvm_mips_subvi_d_test() nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    st.d $w0, 0($1)
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1
+  %0 = load <2 x i64>, ptr @llvm_mips_subvi_d_ARG1
   %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES
+  store <2 x i64> %1, ptr @llvm_mips_subvi_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll
index e752e5a59681d..e55799cf17667 100644
--- a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll
+++ b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll
@@ -9,13 +9,12 @@
 
 define void @llvm_mips_ld_b_test() nounwind {
 entry:
-  %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
-  %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 16)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+  %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 16)
+  store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
   ret void
 }
 
-declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
+declare <16 x i8> @llvm.mips.ld.b(ptr, i32) nounwind
 
 ; CHECK: llvm_mips_ld_b_test:
 ; CHECK: ld.b [[R1:\$w[0-9]+]], 16(
@@ -25,9 +24,8 @@ declare <16 x i8> @llvm.mips.ld.b(i8*, i32) nounwind
 
 define void @llvm_mips_ld_b_unaligned_test() nounwind {
 entry:
-  %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
-  %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 9)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
+  %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 9)
+  store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
   ret void
 }
 
@@ -39,11 +37,10 @@ entry:
 
 define void @llvm_mips_ld_b_valid_range_tests() nounwind {
 entry:
-  %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
-  %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 -512)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
-  %2 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 511)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ld_b_RES
+  %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 -512)
+  store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
+  %1 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 511)
+  store <16 x i8> %1, ptr @llvm_mips_ld_b_RES
   ret void
 }
 
@@ -57,11 +54,10 @@ entry:
 
 define void @llvm_mips_ld_b_invalid_range_tests() nounwind {
 entry:
-  %0 = bitcast <16 x i8>* @llvm_mips_ld_b_ARG to i8*
-  %1 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 -513)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ld_b_RES
-  %2 = tail call <16 x i8> @llvm.mips.ld.b(i8* %0, i32 512)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_ld_b_RES
+  %0 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 -513)
+  store <16 x i8> %0, ptr @llvm_mips_ld_b_RES
+  %1 = tail call <16 x i8> @llvm.mips.ld.b(ptr @llvm_mips_ld_b_ARG, i32 512)
+  store <16 x i8> %1, ptr @llvm_mips_ld_b_RES
   ret void
 }
 
@@ -80,13 +76,12 @@ entry:
 
 define void @llvm_mips_ld_h_test() nounwind {
 entry:
-  %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
-  %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 16)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+  %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 16)
+  store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
   ret void
 }
 
-declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
+declare <8 x i16> @llvm.mips.ld.h(ptr, i32) nounwind
 
 ; CHECK: llvm_mips_ld_h_test:
 ; CHECK: ld.h [[R1:\$w[0-9]+]], 16(
@@ -96,9 +91,8 @@ declare <8 x i16> @llvm.mips.ld.h(i8*, i32) nounwind
 
 define void @llvm_mips_ld_h_unaligned_test() nounwind {
 entry:
-  %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
-  %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 9)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
+  %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 9)
+  store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
   ret void
 }
 
@@ -111,11 +105,10 @@ entry:
 
 define void @llvm_mips_ld_h_valid_range_tests() nounwind {
 entry:
-  %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
-  %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 -1024)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
-  %2 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 1022)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ld_h_RES
+  %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 -1024)
+  store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
+  %1 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 1022)
+  store <8 x i16> %1, ptr @llvm_mips_ld_h_RES
   ret void
 }
 
@@ -129,11 +122,10 @@ entry:
 
 define void @llvm_mips_ld_h_invalid_range_tests() nounwind {
 entry:
-  %0 = bitcast <8 x i16>* @llvm_mips_ld_h_ARG to i8*
-  %1 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 -1026)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_ld_h_RES
-  %2 = tail call <8 x i16> @llvm.mips.ld.h(i8* %0, i32 1024)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_ld_h_RES
+  %0 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 -1026)
+  store <8 x i16> %0, ptr @llvm_mips_ld_h_RES
+  %1 = tail call <8 x i16> @llvm.mips.ld.h(ptr @llvm_mips_ld_h_ARG, i32 1024)
+  store <8 x i16> %1, ptr @llvm_mips_ld_h_RES
   ret void
 }
 
@@ -152,13 +144,12 @@ entry:
 
 define void @llvm_mips_ld_w_test() nounwind {
 entry:
-  %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
-  %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 16)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+  %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 16)
+  store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
   ret void
 }
 
-declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
+declare <4 x i32> @llvm.mips.ld.w(ptr, i32) nounwind
 
 ; CHECK: llvm_mips_ld_w_test:
 ; CHECK: ld.w [[R1:\$w[0-9]+]], 16(
@@ -170,9 +161,8 @@ declare <4 x i32> @llvm.mips.ld.w(i8*, i32) nounwind
 
 define void @llvm_mips_ld_w_unaligned_test() nounwind {
 entry:
-  %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
-  %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 9)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
+  %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 9)
+  store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
   ret void
 }
 
@@ -185,11 +175,10 @@ entry:
 
 define void @llvm_mips_ld_w_valid_range_tests() nounwind {
 entry:
-  %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
-  %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 -2048)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
-  %2 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 2044)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ld_w_RES
+  %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 -2048)
+  store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
+  %1 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 2044)
+  store <4 x i32> %1, ptr @llvm_mips_ld_w_RES
   ret void
 }
 
@@ -203,11 +192,10 @@ entry:
 
 define void @llvm_mips_ld_w_invalid_range_tests() nounwind {
 entry:
-  %0 = bitcast <4 x i32>* @llvm_mips_ld_w_ARG to i8*
-  %1 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 -2052)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_ld_w_RES
-  %2 = tail call <4 x i32> @llvm.mips.ld.w(i8* %0, i32 2048)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_ld_w_RES
+  %0 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 -2052)
+  store <4 x i32> %0, ptr @llvm_mips_ld_w_RES
+  %1 = tail call <4 x i32> @llvm.mips.ld.w(ptr @llvm_mips_ld_w_ARG, i32 2048)
+  store <4 x i32> %1, ptr @llvm_mips_ld_w_RES
   ret void
 }
 
@@ -223,13 +211,12 @@ entry:
 
 define void @llvm_mips_ld_d_test() nounwind {
 entry:
-  %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
-  %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 16)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+  %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 16)
+  store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
   ret void
 }
 
-declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
+declare <2 x i64> @llvm.mips.ld.d(ptr, i32) nounwind
 
 ; CHECK: llvm_mips_ld_d_test:
 ; CHECK: ld.d [[R1:\$w[0-9]+]], 16(
@@ -239,9 +226,8 @@ declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind
 
 define void @llvm_mips_ld_d_unaligned_test() nounwind {
 entry:
-  %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
-  %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 9)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
+  %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 9)
+  store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
   ret void
 }
 
@@ -254,11 +240,10 @@ entry:
 
 define void @llvm_mips_ld_d_valid_range_tests() nounwind {
 entry:
-  %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
-  %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 -4096)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
-  %2 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 4088)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ld_d_RES
+  %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 -4096)
+  store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
+  %1 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 4088)
+  store <2 x i64> %1, ptr @llvm_mips_ld_d_RES
   ret void
 }
 
@@ -272,11 +257,10 @@ entry:
 
 define void @llvm_mips_ld_d_invalid_range_tests() nounwind {
 entry:
-  %0 = bitcast <2 x i64>* @llvm_mips_ld_d_ARG to i8*
-  %1 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 -4104)
-  store <2 x i64> %1, <2 x i64>* @llvm_mips_ld_d_RES
-  %2 = tail call <2 x i64> @llvm.mips.ld.d(i8* %0, i32 4096)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_ld_d_RES
+  %0 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 -4104)
+  store <2 x i64> %0, ptr @llvm_mips_ld_d_RES
+  %1 = tail call <2 x i64> @llvm.mips.ld.d(ptr @llvm_mips_ld_d_ARG, i32 4096)
+  store <2 x i64> %1, ptr @llvm_mips_ld_d_RES
   ret void
 }
 
@@ -297,13 +281,12 @@ entry:
 
 define void @llvm_mips_st_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
-  %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16)
+  %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 16)
   ret void
 }
 
-declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
+declare void @llvm.mips.st.b(<16 x i8>, ptr, i32) nounwind
 
 ; CHECK: llvm_mips_st_b_test:
 ; CHECK: ld.b
@@ -313,9 +296,8 @@ declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind
 
 define void @llvm_mips_st_b_unaligned_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
-  %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 9)
+  %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 9)
   ret void
 }
 
@@ -327,10 +309,9 @@ entry:
 
 define void @llvm_mips_st_b_valid_range_tests() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
-  %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 -512)
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 511)
+  %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 -512)
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 511)
   ret void
 }
 
@@ -343,10 +324,9 @@ entry:
 
 define void @llvm_mips_st_b_invalid_range_tests() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG
-  %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8*
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 -513)
-  tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 512)
+  %0 = load <16 x i8>, ptr @llvm_mips_st_b_ARG
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 -513)
+  tail call void @llvm.mips.st.b(<16 x i8> %0, ptr @llvm_mips_st_b_RES, i32 512)
   ret void
 }
 
@@ -364,13 +344,12 @@ entry:
 
 define void @llvm_mips_st_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
-  %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16)
+  %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 16)
   ret void
 }
 
-declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
+declare void @llvm.mips.st.h(<8 x i16>, ptr, i32) nounwind
 
 ; CHECK: llvm_mips_st_h_test:
 ; CHECK: ld.h
@@ -380,9 +359,8 @@ declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind
 
 define void @llvm_mips_st_h_unaligned_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
-  %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 9)
+  %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 9)
   ret void
 }
 
@@ -395,10 +373,9 @@ entry:
 
 define void @llvm_mips_st_h_valid_range_tests() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
-  %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 -1024)
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 1022)
+  %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 -1024)
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 1022)
   ret void
 }
 
@@ -411,10 +388,9 @@ entry:
 
 define void @llvm_mips_st_h_invalid_range_tests() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG
-  %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8*
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 -1026)
-  tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 1024)
+  %0 = load <8 x i16>, ptr @llvm_mips_st_h_ARG
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 -1026)
+  tail call void @llvm.mips.st.h(<8 x i16> %0, ptr @llvm_mips_st_h_RES, i32 1024)
   ret void
 }
 
@@ -432,13 +408,12 @@ entry:
 
 define void @llvm_mips_st_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
-  %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16)
+  %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 16)
   ret void
 }
 
-declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
+declare void @llvm.mips.st.w(<4 x i32>, ptr, i32) nounwind
 
 ; CHECK: llvm_mips_st_w_test:
 ; CHECK: ld.w
@@ -448,9 +423,8 @@ declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind
 
 define void @llvm_mips_st_w_unaligned_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
-  %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 9)
+  %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 9)
   ret void
 }
 
@@ -463,10 +437,9 @@ entry:
 
 define void @llvm_mips_st_w_valid_range_tests() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
-  %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 -2048)
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 2044)
+  %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 -2048)
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 2044)
   ret void
 }
 
@@ -479,10 +452,9 @@ entry:
 
 define void @llvm_mips_st_w_invalid_range_tests() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG
-  %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8*
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 -2052)
-  tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 2048)
+  %0 = load <4 x i32>, ptr @llvm_mips_st_w_ARG
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 -2052)
+  tail call void @llvm.mips.st.w(<4 x i32> %0, ptr @llvm_mips_st_w_RES, i32 2048)
   ret void
 }
 
@@ -500,13 +472,12 @@ entry:
 
 define void @llvm_mips_st_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
-  %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16)
+  %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 16)
   ret void
 }
 
-declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
+declare void @llvm.mips.st.d(<2 x i64>, ptr, i32) nounwind
 
 ; CHECK: llvm_mips_st_d_test:
 ; CHECK: ld.d
@@ -516,9 +487,8 @@ declare void @llvm.mips.st.d(<2 x i64>, i8*, i32) nounwind
 
 define void @llvm_mips_st_d_unaligned_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
-  %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 9)
+  %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 9)
   ret void
 }
 
@@ -531,10 +501,9 @@ entry:
 
 define void @llvm_mips_st_d_valid_range_tests() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
-  %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 -4096)
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 4088)
+  %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 -4096)
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 4088)
   ret void
 }
 
@@ -547,10 +516,9 @@ entry:
 
 define void @llvm_mips_st_d_invalid_range_tests() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG
-  %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8*
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 -4104)
-  tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 4096)
+  %0 = load <2 x i64>, ptr @llvm_mips_st_d_ARG
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 -4104)
+  tail call void @llvm.mips.st.d(<2 x i64> %0, ptr @llvm_mips_st_d_RES, i32 4096)
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/i8.ll b/llvm/test/CodeGen/Mips/msa/i8.ll
index 2cccdab791618..89f5725c17357 100644
--- a/llvm/test/CodeGen/Mips/msa/i8.ll
+++ b/llvm/test/CodeGen/Mips/msa/i8.ll
@@ -8,9 +8,9 @@
 
 define void @llvm_mips_andi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_andi_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_andi_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_andi_b_RES
   ret void
 }
 
@@ -28,10 +28,10 @@ declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_bmnzi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmnzi_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bmnzi_b_RES
   ret void
 }
 
@@ -52,10 +52,10 @@ declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind
 
 define void @llvm_mips_bmzi_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bmzi_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmzi_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bmzi_b_RES
   ret void
 }
 
@@ -77,10 +77,10 @@ declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind
 
 define void @llvm_mips_bseli_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bseli_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bseli_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bseli_b_RES
   ret void
 }
 
@@ -100,9 +100,9 @@ declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind
 
 define void @llvm_mips_nori_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nori_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_nori_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_nori_b_RES
   ret void
 }
 
@@ -119,9 +119,9 @@ declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_ori_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ori_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_ori_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_ori_b_RES
   ret void
 }
 
@@ -138,9 +138,9 @@ declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_shf_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_shf_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_shf_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_shf_b_RES
   ret void
 }
 
@@ -157,9 +157,9 @@ declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind
 
 define void @llvm_mips_shf_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_shf_h_ARG1
+  %0 = load <8 x i16>, ptr @llvm_mips_shf_h_ARG1
   %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25)
-  store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES
+  store <8 x i16> %1, ptr @llvm_mips_shf_h_RES
   ret void
 }
 
@@ -176,9 +176,9 @@ declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind
 
 define void @llvm_mips_shf_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_shf_w_ARG1
+  %0 = load <4 x i32>, ptr @llvm_mips_shf_w_ARG1
   %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25)
-  store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES
+  store <4 x i32> %1, ptr @llvm_mips_shf_w_RES
   ret void
 }
 
@@ -195,9 +195,9 @@ declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind
 
 define void @llvm_mips_xori_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xori_b_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_xori_b_ARG1
   %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25)
-  store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES
+  store <16 x i8> %1, ptr @llvm_mips_xori_b_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/immediates-bad.ll b/llvm/test/CodeGen/Mips/msa/immediates-bad.ll
index cd0ef21d0022d..bbb76f3b8b6bf 100644
--- a/llvm/test/CodeGen/Mips/msa/immediates-bad.ll
+++ b/llvm/test/CodeGen/Mips/msa/immediates-bad.ll
@@ -4,1576 +4,1576 @@
 ; Test that the immediate intrinsics with out of range values trigger an error.
 
 
-define void @binsli_b(<16 x i8> * %ptr) {
+define void @binsli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 ; CHECK: LLVM ERROR: Immediate out of range
 
-define void @binsri_b(<16 x i8> * %ptr) {
+define void @binsri_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bmnzi_b(<16 x i8> * %ptr) {
+define void @bmnzi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 63)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bmzi_b(<16 x i8> * %ptr) {
+define void @bmzi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 63)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_b(<16 x i8> * %ptr) {
+define void @bnegi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseli_b(<16 x i8> * %ptr) {
+define void @bseli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 63)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_b(<16 x i8> * %ptr) {
+define void @bseti_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 9)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_b(<16 x i8> * %ptr) {
+define void @clei_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 152)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_b(<16 x i8> * %ptr) {
+define void @clei_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_b(<16 x i8> * %ptr) {
+define void @clti_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 129)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_b(<16 x i8> * %ptr) {
+define void @clti_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_b(<16 x i8> * %ptr) {
+define void @ldi_b(ptr %ptr) {
 entry:
   %r = call <16 x i8> @llvm.mips.ldi.b(i32 1025)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_b(<16 x i8> * %ptr) {
+define void @maxi_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_b(<16 x i8> * %ptr) {
+define void @maxi_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_b(<16 x i8> * %ptr) {
+define void @mini_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_b(<16 x i8> * %ptr) {
+define void @mini_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 163)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @nori_b(<16 x i8> * %ptr) {
+define void @nori_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 63)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ori_b(<16 x i8> * %ptr) {
+define void @ori_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 63)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_b(<16 x i8> * %ptr) {
+define void @sldi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_b(<16 x i8> * %ptr) {
+define void @slli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_b(<16 x i8> * %ptr) {
+define void @splati_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_b(<16 x i8> * %ptr) {
+define void @srai_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_b(<16 x i8> * %ptr) {
+define void @srari_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_b(<16 x i8> * %ptr) {
+define void @srli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_b(<16 x i8> * %ptr) {
+define void @srlri_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 65)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @addvi_w(<4 x i32> * %ptr) {
+define void @addvi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_w(<4 x i32> * %ptr) {
+define void @bclri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_w(<4 x i32> * %ptr) {
+define void @binsli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_w(<4 x i32> * %ptr) {
+define void @binsri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_w(<4 x i32> * %ptr) {
+define void @bnegi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_w(<4 x i32> * %ptr) {
+define void @bseti_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_w(<4 x i32> * %ptr) {
+define void @clei_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_w(<4 x i32> * %ptr) {
+define void @clei_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_w(<4 x i32> * %ptr) {
+define void @clti_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_w(<4 x i32> * %ptr) {
+define void @clti_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_w(<4 x i32> * %ptr) {
+define void @maxi_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_w(<4 x i32> * %ptr) {
+define void @maxi_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_w(<4 x i32> * %ptr) {
+define void @mini_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_w(<4 x i32> * %ptr) {
+define void @mini_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_w(<4 x i32> * %ptr) {
+define void @ldi_w(ptr %ptr) {
 entry:
   %r = call <4 x i32> @llvm.mips.ldi.w(i32 1024)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_w(<4 x i32> * %ptr) {
+define void @sldi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 63)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_w(<4 x i32> * %ptr) {
+define void @slli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_w(<4 x i32> * %ptr) {
+define void @splati_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_w(<4 x i32> * %ptr) {
+define void @srai_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_w(<4 x i32> * %ptr) {
+define void @srari_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_w(<4 x i32> * %ptr) {
+define void @srli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_w(<4 x i32> * %ptr) {
+define void @srlri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 65)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @addvi_h(<8 x i16> * %ptr) {
+define void @addvi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_h(<8 x i16> * %ptr) {
+define void @bclri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 16)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_h(<8 x i16> * %ptr) {
+define void @binsli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 17)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_h(<8 x i16> * %ptr) {
+define void @binsri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 19)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_h(<8 x i16> * %ptr) {
+define void @bnegi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 19)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_h(<8 x i16> * %ptr) {
+define void @bseti_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 19)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_h(<8 x i16> * %ptr) {
+define void @clei_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 63)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_h(<8 x i16> * %ptr) {
+define void @clei_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 130)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_h(<8 x i16> * %ptr) {
+define void @clti_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 63)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_h(<8 x i16> * %ptr) {
+define void @clti_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 63)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_h(<8 x i16> * %ptr) {
+define void @maxi_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 63)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_h(<8 x i16> * %ptr) {
+define void @maxi_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 130)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_h(<8 x i16> * %ptr) {
+define void @mini_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 63)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_h(<8 x i16> * %ptr) {
+define void @mini_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 130)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_h(<8 x i16> * %ptr) {
+define void @ldi_h(ptr %ptr) {
 entry:
   %r = call <8 x i16> @llvm.mips.ldi.h(i32 1024)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_h(<8 x i16> * %ptr) {
+define void @sldi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_h(<8 x i16> * %ptr) {
+define void @slli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_h(<8 x i16> * %ptr) {
+define void @splati_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_h(<8 x i16> * %ptr) {
+define void @srai_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_h(<8 x i16> * %ptr) {
+define void @srari_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_h(<8 x i16> * %ptr) {
+define void @srli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_h(<8 x i16> * %ptr) {
+define void @srlri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 65)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define i32 @copy_s_b(<16 x i8> * %ptr) {
+define i32 @copy_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 17)
   ret i32 %r
 }
 
 
-define i32 @copy_s_h(<8 x i16> * %ptr) {
+define i32 @copy_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 9)
   ret i32 %r
 }
 
 
-define i32 @copy_s_w(<4 x i32> * %ptr) {
+define i32 @copy_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 5)
   ret i32 %r
 }
 
 
-define i32 @copy_u_b(<16 x i8> * %ptr) {
+define i32 @copy_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 16)
   ret i32 %r
 }
 
 
-define i32 @copy_u_h(<8 x i16> * %ptr) {
+define i32 @copy_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 9)
   ret i32 %r
 }
 
 
-define i32 @copy_u_w(<4 x i32> * %ptr) {
+define i32 @copy_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 5)
   ret i32 %r
 }
 
-define i64 @copy_s_d(<2 x i64> * %ptr) {
-entry:  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @copy_s_d(ptr %ptr) {
+entry:  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 3)
   ret i64 %r
 }
 
-define i64 @copy_u_d(<2 x i64> * %ptr) {
-entry:  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @copy_u_d(ptr %ptr) {
+entry:  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 3)
   ret i64 %r
 }
 
-define void @addvi_d(<2 x i64> * %ptr) {
+define void @addvi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_d(<2 x i64> * %ptr) {
+define void @bclri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 64)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_d(<2 x i64> * %ptr) {
+define void @binsli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_d(<2 x i64> * %ptr) {
+define void @binsri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_d(<2 x i64> * %ptr) {
+define void @bnegi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_d(<2 x i64> * %ptr) {
+define void @bseti_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_d(<2 x i64> * %ptr) {
+define void @clei_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_d(<2 x i64> * %ptr) {
+define void @clei_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_d(<2 x i64> * %ptr) {
+define void @clti_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_d(<2 x i64> * %ptr) {
+define void @clti_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_d(<2 x i64> * %ptr) {
+define void @ldi_d(ptr %ptr) {
 entry:
   %r = call <2 x i64> @llvm.mips.ldi.d(i32 1024)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_d(<2 x i64> * %ptr) {
+define void @maxi_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_d(<2 x i64> * %ptr) {
+define void @maxi_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_d(<2 x i64> * %ptr) {
+define void @mini_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_d(<2 x i64> * %ptr) {
+define void @mini_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 63)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_d(<2 x i64> * %ptr) {
+define void @sldi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_d(<2 x i64> * %ptr) {
+define void @slli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_d(<2 x i64> * %ptr) {
+define void @srai_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_d(<2 x i64> * %ptr) {
+define void @srari_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_d(<2 x i64> * %ptr) {
+define void @srli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_d(<2 x i64> * %ptr) {
+define void @srlri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 65)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }; Negative numbers
 
 
-define void @neg_addvi_b(<16 x i8> * %ptr) {
+define void @neg_addvi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_andi_b(<16 x i8> * %ptr) {
+define void @neg_andi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bclri_b(<16 x i8> * %ptr) {
+define void @neg_bclri_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsli_b(<16 x i8> * %ptr) {
+define void @neg_binsli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsri_b(<16 x i8> * %ptr) {
+define void @neg_binsri_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %a, i32 5)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bmnzi_b(<16 x i8> * %ptr) {
+define void @neg_bmnzi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bmzi_b(<16 x i8> * %ptr) {
+define void @neg_bmzi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bnegi_b(<16 x i8> * %ptr) {
+define void @neg_bnegi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bseli_b(<16 x i8> * %ptr) {
+define void @neg_bseli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bseti_b(<16 x i8> * %ptr) {
+define void @neg_bseti_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 -5)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_s_b(<16 x i8> * %ptr) {
+define void @neg_clei_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 -120)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_u_b(<16 x i8> * %ptr) {
+define void @neg_clei_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_s_b(<16 x i8> * %ptr) {
+define void @neg_clti_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 -35)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_u_b(<16 x i8> * %ptr) {
+define void @neg_clti_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_ldi_b(<16 x i8> * %ptr) {
+define void @neg_ldi_b(ptr %ptr) {
 entry:
   %r = call <16 x i8> @llvm.mips.ldi.b(i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_s_b(<16 x i8> * %ptr) {
+define void @neg_maxi_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_u_b(<16 x i8> * %ptr) {
+define void @neg_maxi_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_s_b(<16 x i8> * %ptr) {
+define void @neg_mini_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_u_b(<16 x i8> * %ptr) {
+define void @neg_mini_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_nori_b(<16 x i8> * %ptr) {
+define void @neg_nori_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_ori_b(<16 x i8> * %ptr) {
+define void @neg_ori_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 -25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_sldi_b(<16 x i8> * %ptr) {
+define void @neg_sldi_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 -7)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_slli_b(<16 x i8> * %ptr) {
+define void @neg_slli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_splati_b(<16 x i8> * %ptr) {
+define void @neg_splati_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srai_b(<16 x i8> * %ptr) {
+define void @neg_srai_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srari_b(<16 x i8> * %ptr) {
+define void @neg_srari_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srli_b(<16 x i8> * %ptr) {
+define void @neg_srli_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srlri_b(<16 x i8> * %ptr) {
+define void @neg_srlri_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 -3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_addvi_w(<4 x i32> * %ptr) {
+define void @neg_addvi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bclri_w(<4 x i32> * %ptr) {
+define void @neg_bclri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsli_w(<4 x i32> * %ptr) {
+define void @neg_binsli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsri_w(<4 x i32> * %ptr) {
+define void @neg_binsri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bnegi_w(<4 x i32> * %ptr) {
+define void @neg_bnegi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bseti_w(<4 x i32> * %ptr) {
+define void @neg_bseti_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_s_w(<4 x i32> * %ptr) {
+define void @neg_clei_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 -140)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_u_w(<4 x i32> * %ptr) {
+define void @neg_clei_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_s_w(<4 x i32> * %ptr) {
+define void @neg_clti_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 -150)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_u_w(<4 x i32> * %ptr) {
+define void @neg_clti_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 -25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_s_w(<4 x i32> * %ptr) {
+define void @neg_maxi_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 -200)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_u_w(<4 x i32> * %ptr) {
+define void @neg_maxi_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 -200)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_s_w(<4 x i32> * %ptr) {
+define void @neg_mini_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 -200)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_u_w(<4 x i32> * %ptr) {
+define void @neg_mini_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 -200)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_ldi_w(<4 x i32> * %ptr) {
+define void @neg_ldi_w(ptr %ptr) {
 entry:
   %r = call <4 x i32> @llvm.mips.ldi.w(i32 -300)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_sldi_w(<4 x i32> * %ptr) {
+define void @neg_sldi_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 -20)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_slli_w(<4 x i32> * %ptr) {
+define void @neg_slli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_splati_w(<4 x i32> * %ptr) {
+define void @neg_splati_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srai_w(<4 x i32> * %ptr) {
+define void @neg_srai_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srari_w(<4 x i32> * %ptr) {
+define void @neg_srari_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srli_w(<4 x i32> * %ptr) {
+define void @neg_srli_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srlri_w(<4 x i32> * %ptr) {
+define void @neg_srlri_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 -3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_addvi_h(<8 x i16> * %ptr) {
+define void @neg_addvi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 -25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bclri_h(<8 x i16> * %ptr) {
+define void @neg_bclri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 -8)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsli_h(<8 x i16> * %ptr) {
+define void @neg_binsli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %a, i32 -8)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsri_h(<8 x i16> * %ptr) {
+define void @neg_binsri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %a, i32 -15)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bnegi_h(<8 x i16> * %ptr) {
+define void @neg_bnegi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 -14)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bseti_h(<8 x i16> * %ptr) {
+define void @neg_bseti_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 -15)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_s_h(<8 x i16> * %ptr) {
+define void @neg_clei_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 -25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_u_h(<8 x i16> * %ptr) {
+define void @neg_clei_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 -25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_s_h(<8 x i16> * %ptr) {
+define void @neg_clti_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 -150)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_u_h(<8 x i16> * %ptr) {
+define void @neg_clti_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 -25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_s_h(<8 x i16> * %ptr) {
+define void @neg_maxi_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 -200)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_u_h(<8 x i16> * %ptr) {
+define void @neg_maxi_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 -200)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_s_h(<8 x i16> * %ptr) {
+define void @neg_mini_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 -200)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_u_h(<8 x i16> * %ptr) {
+define void @neg_mini_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 -2)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_ldi_h(<8 x i16> * %ptr) {
+define void @neg_ldi_h(ptr %ptr) {
 entry:
   %r = call <8 x i16> @llvm.mips.ldi.h(i32 -300)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_sldi_h(<8 x i16> * %ptr) {
+define void @neg_sldi_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_slli_h(<8 x i16> * %ptr) {
+define void @neg_slli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_splati_h(<8 x i16> * %ptr) {
+define void @neg_splati_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srai_h(<8 x i16> * %ptr) {
+define void @neg_srai_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srari_h(<8 x i16> * %ptr) {
+define void @neg_srari_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srli_h(<8 x i16> * %ptr) {
+define void @neg_srli_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srlri_h(<8 x i16> * %ptr) {
+define void @neg_srlri_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 -3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define i32 @neg_copy_s_b(<16 x i8> * %ptr) {
+define i32 @neg_copy_s_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 -1)
   ret i32 %r
 }
 
-define i32 @neg_copy_s_h(<8 x i16> * %ptr) {
+define i32 @neg_copy_s_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 -1)
   ret i32 %r
 }
 
-define i32 @neg_copy_s_w(<4 x i32> * %ptr) {
+define i32 @neg_copy_s_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 -1)
   ret i32 %r
 }
 
-define i32 @neg_copy_u_b(<16 x i8> * %ptr) {
+define i32 @neg_copy_u_b(ptr %ptr) {
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 -1)
   ret i32 %r
 }
 
 
-define i32 @neg_copy_u_h(<8 x i16> * %ptr) {
+define i32 @neg_copy_u_h(ptr %ptr) {
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 -1)
   ret i32 %r
 }
 
 
-define i32 @neg_copy_u_w(<4 x i32> * %ptr) {
+define i32 @neg_copy_u_w(ptr %ptr) {
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 -1)
   ret i32 %r
 }
 
-define i64 @neg_copy_s_d(<2 x i64> * %ptr) {
-entry:  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @neg_copy_s_d(ptr %ptr) {
+entry:  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 -1)
   ret i64 %r
 }
 
-define i64 @neg_copy_u_d(<2 x i64> * %ptr) {
-entry:  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+define i64 @neg_copy_u_d(ptr %ptr) {
+entry:  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 -1)
   ret i64 %r
 }
 
-define void @neg_addvi_d(<2 x i64> * %ptr) {
+define void @neg_addvi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bclri_d(<2 x i64> * %ptr) {
+define void @neg_bclri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsli_d(<2 x i64> * %ptr) {
+define void @neg_binsli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_binsri_d(<2 x i64> * %ptr) {
+define void @neg_binsri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bnegi_d(<2 x i64> * %ptr) {
+define void @neg_bnegi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_bseti_d(<2 x i64> * %ptr) {
+define void @neg_bseti_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_s_d(<2 x i64> * %ptr) {
+define void @neg_clei_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 -45)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clei_u_d(<2 x i64> * %ptr) {
+define void @neg_clei_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_s_d(<2 x i64> * %ptr) {
+define void @neg_clti_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 -32)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_clti_u_d(<2 x i64> * %ptr) {
+define void @neg_clti_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 -25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_ldi_d(<2 x i64> * %ptr) {
+define void @neg_ldi_d(ptr %ptr) {
 entry:
   %r = call <2 x i64> @llvm.mips.ldi.d(i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_s_d(<2 x i64> * %ptr) {
+define void @neg_maxi_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 -202)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_maxi_u_d(<2 x i64> * %ptr) {
+define void @neg_maxi_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 -2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_s_d(<2 x i64> * %ptr) {
+define void @neg_mini_s_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 -202)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_mini_u_d(<2 x i64> * %ptr) {
+define void @neg_mini_u_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 -2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_sldi_d(<2 x i64> * %ptr) {
+define void @neg_sldi_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 -1)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_slli_d(<2 x i64> * %ptr) {
+define void @neg_slli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srai_d(<2 x i64> * %ptr) {
+define void @neg_srai_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srari_d(<2 x i64> * %ptr) {
+define void @neg_srari_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srli_d(<2 x i64> * %ptr) {
+define void @neg_srli_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @neg_srlri_d(<2 x i64> * %ptr) {
+define void @neg_srlri_d(ptr %ptr) {
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 -3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/immediates.ll b/llvm/test/CodeGen/Mips/msa/immediates.ll
index db1eb17cf4a10..5808643aded1c 100644
--- a/llvm/test/CodeGen/Mips/msa/immediates.ll
+++ b/llvm/test/CodeGen/Mips/msa/immediates.ll
@@ -10,7 +10,7 @@
 
 ; Some of the intrinsics lower to equivalent forms.
 
-define void @addvi_b(<16 x i8> * %ptr) {
+define void @addvi_b(ptr %ptr) {
 ; MSA-LABEL: addvi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -26,13 +26,13 @@ define void @addvi_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @andi_b(<16 x i8> * %ptr) {
+define void @andi_b(ptr %ptr) {
 ; MSA-LABEL: andi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -48,13 +48,13 @@ define void @andi_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.andi.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_b(<16 x i8> * %ptr) {
+define void @bclri_b(ptr %ptr) {
 ; MSA-LABEL: bclri_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -70,13 +70,13 @@ define void @bclri_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @binsli_b(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsli_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($5)
@@ -95,14 +95,14 @@ define void @binsli_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w1, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
-  %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
+  %b = load <16 x i8>, ptr %ptr2, align 16
   %r = call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %a, <16 x i8> %b, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @binsri_b(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsri_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($5)
@@ -121,14 +121,14 @@ define void @binsri_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w1, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
-  %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
+  %b = load <16 x i8>, ptr %ptr2, align 16
   %r = call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %a, <16 x i8> %b, i32 5)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bmnzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @bmnzi_b(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: bmnzi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($5)
@@ -147,14 +147,14 @@ define void @bmnzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w1, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
-  %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
+  %b = load <16 x i8>, ptr %ptr2, align 16
   %r = call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bmzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
+define void @bmzi_b(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: bmzi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -173,14 +173,14 @@ define void @bmzi_b(<16 x i8> * %ptr, <16 x i8> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w1, 0($2)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
-  %b = load <16 x i8>, <16 x i8> * %ptr2, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
+  %b = load <16 x i8>, ptr %ptr2, align 16
   %r = call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %a, <16 x i8> %b, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_b(<16 x i8> * %ptr) {
+define void @bnegi_b(ptr %ptr) {
 ; MSA-LABEL: bnegi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -196,13 +196,13 @@ define void @bnegi_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %a, i32 6)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseli_b(<16 x i8> * %ptr) {
+define void @bseli_b(ptr %ptr) {
 ; MSA-LABEL: bseli_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -218,13 +218,13 @@ define void @bseli_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %a, <16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_b(<16 x i8> * %ptr) {
+define void @bseti_b(ptr %ptr) {
 ; MSA-LABEL: bseti_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -240,13 +240,13 @@ define void @bseti_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %a, i32 5)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_b(<16 x i8> * %ptr) {
+define void @clei_s_b(ptr %ptr) {
 ; MSA-LABEL: clei_s_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -262,13 +262,13 @@ define void @clei_s_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %a, i32 12)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_b(<16 x i8> * %ptr) {
+define void @clei_u_b(ptr %ptr) {
 ; MSA-LABEL: clei_u_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -284,13 +284,13 @@ define void @clei_u_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_b(<16 x i8> * %ptr) {
+define void @clti_s_b(ptr %ptr) {
 ; MSA-LABEL: clti_s_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -306,13 +306,13 @@ define void @clti_s_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %a, i32 15)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_b(<16 x i8> * %ptr) {
+define void @clti_u_b(ptr %ptr) {
 ; MSA-LABEL: clti_u_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -328,13 +328,13 @@ define void @clti_u_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_b(<16 x i8> * %ptr) {
+define void @ldi_b(ptr %ptr) {
 ; MSA-LABEL: ldi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ldi.b $w0, 3
@@ -349,11 +349,11 @@ define void @ldi_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
   %r = call <16 x i8> @llvm.mips.ldi.b(i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_b(<16 x i8> * %ptr) {
+define void @maxi_s_b(ptr %ptr) {
 ; MSA-LABEL: maxi_s_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -369,13 +369,13 @@ define void @maxi_s_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_b(<16 x i8> * %ptr) {
+define void @maxi_u_b(ptr %ptr) {
 ; MSA-LABEL: maxi_u_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -391,13 +391,13 @@ define void @maxi_u_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_b(<16 x i8> * %ptr) {
+define void @mini_s_b(ptr %ptr) {
 ; MSA-LABEL: mini_s_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -413,13 +413,13 @@ define void @mini_s_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_b(<16 x i8> * %ptr) {
+define void @mini_u_b(ptr %ptr) {
 ; MSA-LABEL: mini_u_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -435,13 +435,13 @@ define void @mini_u_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %a, i32 2)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @nori_b(<16 x i8> * %ptr) {
+define void @nori_b(ptr %ptr) {
 ; MSA-LABEL: nori_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -457,13 +457,13 @@ define void @nori_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.nori.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ori_b(<16 x i8> * %ptr) {
+define void @ori_b(ptr %ptr) {
 ; MSA-LABEL: ori_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -479,13 +479,13 @@ define void @ori_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.ori.b(<16 x i8> %a, i32 25)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_b(<16 x i8> * %ptr) {
+define void @sldi_b(ptr %ptr) {
 ; MSA-LABEL: sldi_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -501,13 +501,13 @@ define void @sldi_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %a, <16 x i8> %a, i32 7)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_b(<16 x i8> * %ptr) {
+define void @slli_b(ptr %ptr) {
 ; MSA-LABEL: slli_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -523,13 +523,13 @@ define void @slli_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.slli.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_b(<16 x i8> * %ptr) {
+define void @splati_b(ptr %ptr) {
 ; MSA-LABEL: splati_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -545,13 +545,13 @@ define void @splati_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.splati.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_b(<16 x i8> * %ptr) {
+define void @srai_b(ptr %ptr) {
 ; MSA-LABEL: srai_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -567,13 +567,13 @@ define void @srai_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srai.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_b(<16 x i8> * %ptr) {
+define void @srari_b(ptr %ptr) {
 ; MSA-LABEL: srari_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -589,13 +589,13 @@ define void @srari_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srari.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_b(<16 x i8> * %ptr) {
+define void @srli_b(ptr %ptr) {
 ; MSA-LABEL: srli_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -611,13 +611,13 @@ define void @srli_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srli.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_b(<16 x i8> * %ptr) {
+define void @srlri_b(ptr %ptr) {
 ; MSA-LABEL: srlri_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -633,13 +633,13 @@ define void @srlri_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.b $w0, 0($1)
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %a, i32 3)
-  store <16 x i8> %r, <16 x i8> * %ptr, align 16
+  store <16 x i8> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @addvi_w(<4 x i32> * %ptr) {
+define void @addvi_w(ptr %ptr) {
 ; MSA-LABEL: addvi_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -655,13 +655,13 @@ define void @addvi_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_w(<4 x i32> * %ptr) {
+define void @bclri_w(ptr %ptr) {
 ; MSA-LABEL: bclri_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -677,13 +677,13 @@ define void @bclri_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
+define void @binsli_w(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsli_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($5)
@@ -702,14 +702,14 @@ define void @binsli_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w1, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
-  %b = load <4 x i32>, <4 x i32> * %ptr2, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
+  %b = load <4 x i32>, ptr %ptr2, align 16
   %r = call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %a, <4 x i32> %b, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
+define void @binsri_w(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsri_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($5)
@@ -728,14 +728,14 @@ define void @binsri_w(<4 x i32> * %ptr, <4 x i32> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w1, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
-  %b = load <4 x i32>, <4 x i32> * %ptr2, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
+  %b = load <4 x i32>, ptr %ptr2, align 16
   %r = call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %a, <4 x i32> %b, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_w(<4 x i32> * %ptr) {
+define void @bnegi_w(ptr %ptr) {
 ; MSA-LABEL: bnegi_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -751,13 +751,13 @@ define void @bnegi_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_w(<4 x i32> * %ptr) {
+define void @bseti_w(ptr %ptr) {
 ; MSA-LABEL: bseti_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -773,13 +773,13 @@ define void @bseti_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_w(<4 x i32> * %ptr) {
+define void @clei_s_w(ptr %ptr) {
 ; MSA-LABEL: clei_s_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -795,13 +795,13 @@ define void @clei_s_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %a, i32 14)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_w(<4 x i32> * %ptr) {
+define void @clei_u_w(ptr %ptr) {
 ; MSA-LABEL: clei_u_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -817,13 +817,13 @@ define void @clei_u_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_w(<4 x i32> * %ptr) {
+define void @clti_s_w(ptr %ptr) {
 ; MSA-LABEL: clti_s_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -839,13 +839,13 @@ define void @clti_s_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %a, i32 15)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_w(<4 x i32> * %ptr) {
+define void @clti_u_w(ptr %ptr) {
 ; MSA-LABEL: clti_u_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -861,13 +861,13 @@ define void @clti_u_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %a, i32 25)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_w(<4 x i32> * %ptr) {
+define void @maxi_s_w(ptr %ptr) {
 ; MSA-LABEL: maxi_s_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -883,13 +883,13 @@ define void @maxi_s_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %a, i32 2)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_w(<4 x i32> * %ptr) {
+define void @maxi_u_w(ptr %ptr) {
 ; MSA-LABEL: maxi_u_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -905,13 +905,13 @@ define void @maxi_u_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %a, i32 2)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_w(<4 x i32> * %ptr) {
+define void @mini_s_w(ptr %ptr) {
 ; MSA-LABEL: mini_s_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -927,13 +927,13 @@ define void @mini_s_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %a, i32 2)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_w(<4 x i32> * %ptr) {
+define void @mini_u_w(ptr %ptr) {
 ; MSA-LABEL: mini_u_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -949,13 +949,13 @@ define void @mini_u_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %a, i32 2)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_w(<4 x i32> * %ptr) {
+define void @ldi_w(ptr %ptr) {
 ; MSA-LABEL: ldi_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ldi.w $w0, 3
@@ -970,11 +970,11 @@ define void @ldi_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
   %r = call <4 x i32> @llvm.mips.ldi.w(i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_w(<4 x i32> * %ptr) {
+define void @sldi_w(ptr %ptr) {
 ; MSA-LABEL: sldi_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -990,13 +990,13 @@ define void @sldi_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %a, <4 x i32> %a, i32 2)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_w(<4 x i32> * %ptr) {
+define void @slli_w(ptr %ptr) {
 ; MSA-LABEL: slli_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1012,13 +1012,13 @@ define void @slli_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.slli.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_w(<4 x i32> * %ptr) {
+define void @splati_w(ptr %ptr) {
 ; MSA-LABEL: splati_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1034,13 +1034,13 @@ define void @splati_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.splati.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_w(<4 x i32> * %ptr) {
+define void @srai_w(ptr %ptr) {
 ; MSA-LABEL: srai_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1056,13 +1056,13 @@ define void @srai_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srai.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_w(<4 x i32> * %ptr) {
+define void @srari_w(ptr %ptr) {
 ; MSA-LABEL: srari_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1078,13 +1078,13 @@ define void @srari_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srari.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_w(<4 x i32> * %ptr) {
+define void @srli_w(ptr %ptr) {
 ; MSA-LABEL: srli_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1100,13 +1100,13 @@ define void @srli_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srli.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_w(<4 x i32> * %ptr) {
+define void @srlri_w(ptr %ptr) {
 ; MSA-LABEL: srlri_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1122,13 +1122,13 @@ define void @srlri_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.w $w0, 0($1)
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %a, i32 3)
-  store <4 x i32> %r, <4 x i32> * %ptr, align 16
+  store <4 x i32> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @addvi_h(<8 x i16> * %ptr) {
+define void @addvi_h(ptr %ptr) {
 ; MSA-LABEL: addvi_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1144,13 +1144,13 @@ define void @addvi_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %a, i32 25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_h(<8 x i16> * %ptr) {
+define void @bclri_h(ptr %ptr) {
 ; MSA-LABEL: bclri_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1166,13 +1166,13 @@ define void @bclri_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %a, i32 8)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
+define void @binsli_h(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsli_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($5)
@@ -1191,14 +1191,14 @@ define void @binsli_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w1, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
-  %b = load <8 x i16>, <8 x i16> * %ptr2, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
+  %b = load <8 x i16>, ptr %ptr2, align 16
   %r = call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %a, <8 x i16> %b, i32 8)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
+define void @binsri_h(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsri_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($5)
@@ -1217,14 +1217,14 @@ define void @binsri_h(<8 x i16> * %ptr, <8 x i16> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w1, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
-  %b = load <8 x i16>, <8 x i16> * %ptr2, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
+  %b = load <8 x i16>, ptr %ptr2, align 16
   %r = call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %a, <8 x i16> %b, i32 14)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_h(<8 x i16> * %ptr) {
+define void @bnegi_h(ptr %ptr) {
 ; MSA-LABEL: bnegi_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1240,13 +1240,13 @@ define void @bnegi_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %a, i32 14)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_h(<8 x i16> * %ptr) {
+define void @bseti_h(ptr %ptr) {
 ; MSA-LABEL: bseti_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1262,13 +1262,13 @@ define void @bseti_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %a, i32 15)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_h(<8 x i16> * %ptr) {
+define void @clei_s_h(ptr %ptr) {
 ; MSA-LABEL: clei_s_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1284,13 +1284,13 @@ define void @clei_s_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %a, i32 13)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_h(<8 x i16> * %ptr) {
+define void @clei_u_h(ptr %ptr) {
 ; MSA-LABEL: clei_u_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1306,13 +1306,13 @@ define void @clei_u_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %a, i32 25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_h(<8 x i16> * %ptr) {
+define void @clti_s_h(ptr %ptr) {
 ; MSA-LABEL: clti_s_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1328,13 +1328,13 @@ define void @clti_s_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %a, i32 15)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_h(<8 x i16> * %ptr) {
+define void @clti_u_h(ptr %ptr) {
 ; MSA-LABEL: clti_u_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1350,13 +1350,13 @@ define void @clti_u_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %a, i32 25)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_h(<8 x i16> * %ptr) {
+define void @maxi_s_h(ptr %ptr) {
 ; MSA-LABEL: maxi_s_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1372,13 +1372,13 @@ define void @maxi_s_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %a, i32 2)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_h(<8 x i16> * %ptr) {
+define void @maxi_u_h(ptr %ptr) {
 ; MSA-LABEL: maxi_u_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1394,13 +1394,13 @@ define void @maxi_u_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %a, i32 2)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_h(<8 x i16> * %ptr) {
+define void @mini_s_h(ptr %ptr) {
 ; MSA-LABEL: mini_s_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1416,13 +1416,13 @@ define void @mini_s_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %a, i32 2)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_h(<8 x i16> * %ptr) {
+define void @mini_u_h(ptr %ptr) {
 ; MSA-LABEL: mini_u_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1438,13 +1438,13 @@ define void @mini_u_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %a, i32 2)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_h(<8 x i16> * %ptr) {
+define void @ldi_h(ptr %ptr) {
 ; MSA-LABEL: ldi_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ldi.h $w0, 3
@@ -1459,11 +1459,11 @@ define void @ldi_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
   %r = call <8 x i16> @llvm.mips.ldi.h(i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_h(<8 x i16> * %ptr) {
+define void @sldi_h(ptr %ptr) {
 ; MSA-LABEL: sldi_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1479,13 +1479,13 @@ define void @sldi_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %a, <8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_h(<8 x i16> * %ptr) {
+define void @slli_h(ptr %ptr) {
 ; MSA-LABEL: slli_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1501,13 +1501,13 @@ define void @slli_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.slli.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @splati_h(<8 x i16> * %ptr) {
+define void @splati_h(ptr %ptr) {
 ; MSA-LABEL: splati_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1523,13 +1523,13 @@ define void @splati_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.splati.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_h(<8 x i16> * %ptr) {
+define void @srai_h(ptr %ptr) {
 ; MSA-LABEL: srai_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1545,13 +1545,13 @@ define void @srai_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srai.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_h(<8 x i16> * %ptr) {
+define void @srari_h(ptr %ptr) {
 ; MSA-LABEL: srari_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1567,13 +1567,13 @@ define void @srari_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srari.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_h(<8 x i16> * %ptr) {
+define void @srli_h(ptr %ptr) {
 ; MSA-LABEL: srli_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1589,13 +1589,13 @@ define void @srli_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srli.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_h(<8 x i16> * %ptr) {
+define void @srlri_h(ptr %ptr) {
 ; MSA-LABEL: srlri_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1611,13 +1611,13 @@ define void @srlri_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.h $w0, 0($1)
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %a, i32 3)
-  store <8 x i16> %r, <8 x i16> * %ptr, align 16
+  store <8 x i16> %r, ptr %ptr, align 16
   ret void
 }
 
-define i32 @copy_s_b(<16 x i8> * %ptr) {
+define i32 @copy_s_b(ptr %ptr) {
 ; MSA-LABEL: copy_s_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -1631,11 +1631,11 @@ define i32 @copy_s_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    copy_s.b $2, $w0[1]
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.b(<16 x i8> %a, i32 1)
   ret i32 %r
 }
-define i32 @copy_s_h(<8 x i16> * %ptr) {
+define i32 @copy_s_h(ptr %ptr) {
 ; MSA-LABEL: copy_s_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1649,11 +1649,11 @@ define i32 @copy_s_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    copy_s.h $2, $w0[1]
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.h(<8 x i16> %a, i32 1)
   ret i32 %r
 }
-define i32 @copy_s_w(<4 x i32> * %ptr) {
+define i32 @copy_s_w(ptr %ptr) {
 ; MSA-LABEL: copy_s_w:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.w $w0, 0($4)
@@ -1667,11 +1667,11 @@ define i32 @copy_s_w(<4 x i32> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    copy_s.w $2, $w0[1]
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.s.w(<4 x i32> %a, i32 1)
   ret i32 %r
 }
-define i32 @copy_u_b(<16 x i8> * %ptr) {
+define i32 @copy_u_b(ptr %ptr) {
 ; MSA-LABEL: copy_u_b:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.b $w0, 0($4)
@@ -1685,11 +1685,11 @@ define i32 @copy_u_b(<16 x i8> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    copy_u.b $2, $w0[1]
 entry:
-  %a = load <16 x i8>, <16 x i8> * %ptr, align 16
+  %a = load <16 x i8>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.b(<16 x i8> %a, i32 1)
   ret i32 %r
 }
-define i32 @copy_u_h(<8 x i16> * %ptr) {
+define i32 @copy_u_h(ptr %ptr) {
 ; MSA-LABEL: copy_u_h:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.h $w0, 0($4)
@@ -1703,11 +1703,11 @@ define i32 @copy_u_h(<8 x i16> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    copy_u.h $2, $w0[1]
 entry:
-  %a = load <8 x i16>, <8 x i16> * %ptr, align 16
+  %a = load <8 x i16>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.h(<8 x i16> %a, i32 1)
   ret i32 %r
 }
-define i32 @copy_u_w(<4 x i32> * %ptr) {
+define i32 @copy_u_w(ptr %ptr) {
 ; MSA32-LABEL: copy_u_w:
 ; MSA32:       # %bb.0: # %entry
 ; MSA32-NEXT:    ld.w $w0, 0($4)
@@ -1727,12 +1727,12 @@ define i32 @copy_u_w(<4 x i32> * %ptr) {
 ; MSA64N64-NEXT:    jr $ra
 ; MSA64N64-NEXT:    copy_u.w $2, $w0[1]
 entry:
-  %a = load <4 x i32>, <4 x i32> * %ptr, align 16
+  %a = load <4 x i32>, ptr %ptr, align 16
   %r = call i32 @llvm.mips.copy.u.w(<4 x i32> %a, i32 1)
   ret i32 %r
 }
 
-define i64 @copy_s_d(<2 x i64> * %ptr) {
+define i64 @copy_s_d(ptr %ptr) {
 ; MSA32-LABEL: copy_s_d:
 ; MSA32:       # %bb.0: # %entry
 ; MSA32-NEXT:    ld.w $w0, 0($4)
@@ -1753,12 +1753,12 @@ define i64 @copy_s_d(<2 x i64> * %ptr) {
 ; MSA64N64-NEXT:    jr $ra
 ; MSA64N64-NEXT:    copy_s.d $2, $w0[1]
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.s.d(<2 x i64> %a, i32 1)
   ret i64 %r
 }
 
-define i64 @copy_u_d(<2 x i64> * %ptr) {
+define i64 @copy_u_d(ptr %ptr) {
 ; MSA32-LABEL: copy_u_d:
 ; MSA32:       # %bb.0: # %entry
 ; MSA32-NEXT:    ld.w $w0, 0($4)
@@ -1779,12 +1779,12 @@ define i64 @copy_u_d(<2 x i64> * %ptr) {
 ; MSA64N64-NEXT:    jr $ra
 ; MSA64N64-NEXT:    copy_s.d $2, $w0[1]
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call i64 @llvm.mips.copy.u.d(<2 x i64> %a, i32 1)
   ret i64 %r
 }
 
-define void @addvi_d(<2 x i64> * %ptr) {
+define void @addvi_d(ptr %ptr) {
 ; MSA-LABEL: addvi_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1800,13 +1800,13 @@ define void @addvi_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %a, i32 25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bclri_d(<2 x i64> * %ptr) {
+define void @bclri_d(ptr %ptr) {
 ; MSA-LABEL: bclri_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1822,13 +1822,13 @@ define void @bclri_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %a, i32 16)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsli_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
+define void @binsli_d(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsli_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($5)
@@ -1847,14 +1847,14 @@ define void @binsli_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w1, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
-  %b = load <2 x i64>, <2 x i64> * %ptr2, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
+  %b = load <2 x i64>, ptr %ptr2, align 16
   %r = call <2 x i64> @llvm.mips.binsli.d(<2 x i64> %a, <2 x i64> %b, i32 4)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @binsri_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
+define void @binsri_d(ptr %ptr, ptr %ptr2) {
 ; MSA-LABEL: binsri_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($5)
@@ -1873,14 +1873,14 @@ define void @binsri_d(<2 x i64> * %ptr, <2 x i64> * %ptr2) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w1, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
-  %b = load <2 x i64>, <2 x i64> * %ptr2, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
+  %b = load <2 x i64>, ptr %ptr2, align 16
   %r = call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %a, <2 x i64> %b, i32 5)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bnegi_d(<2 x i64> * %ptr) {
+define void @bnegi_d(ptr %ptr) {
 ; MSA-LABEL: bnegi_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1896,13 +1896,13 @@ define void @bnegi_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %a, i32 9)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @bseti_d(<2 x i64> * %ptr) {
+define void @bseti_d(ptr %ptr) {
 ; MSA-LABEL: bseti_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1918,13 +1918,13 @@ define void @bseti_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %a, i32 25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_s_d(<2 x i64> * %ptr) {
+define void @clei_s_d(ptr %ptr) {
 ; MSA-LABEL: clei_s_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1940,13 +1940,13 @@ define void @clei_s_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %a, i32 15)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clei_u_d(<2 x i64> * %ptr) {
+define void @clei_u_d(ptr %ptr) {
 ; MSA-LABEL: clei_u_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1962,13 +1962,13 @@ define void @clei_u_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %a, i32 25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_s_d(<2 x i64> * %ptr) {
+define void @clti_s_d(ptr %ptr) {
 ; MSA-LABEL: clti_s_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -1984,13 +1984,13 @@ define void @clti_s_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %a, i32 15)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @clti_u_d(<2 x i64> * %ptr) {
+define void @clti_u_d(ptr %ptr) {
 ; MSA-LABEL: clti_u_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2006,13 +2006,13 @@ define void @clti_u_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %a, i32 25)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ldi_d(<2 x i64> * %ptr) {
+define void @ldi_d(ptr %ptr) {
 ; MSA32-LABEL: ldi_d:
 ; MSA32:       # %bb.0: # %entry
 ; MSA32-NEXT:    ldi.d $w0, 3
@@ -2033,11 +2033,11 @@ define void @ldi_d(<2 x i64> * %ptr) {
 ; MSA64N64-NEXT:    st.d $w0, 0($4)
 entry:
   %r = call <2 x i64> @llvm.mips.ldi.d(i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_s_d(<2 x i64> * %ptr) {
+define void @maxi_s_d(ptr %ptr) {
 ; MSA-LABEL: maxi_s_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2053,13 +2053,13 @@ define void @maxi_s_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %a, i32 2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @maxi_u_d(<2 x i64> * %ptr) {
+define void @maxi_u_d(ptr %ptr) {
 ; MSA-LABEL: maxi_u_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2075,13 +2075,13 @@ define void @maxi_u_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %a, i32 2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_s_d(<2 x i64> * %ptr) {
+define void @mini_s_d(ptr %ptr) {
 ; MSA-LABEL: mini_s_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2097,13 +2097,13 @@ define void @mini_s_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %a, i32 2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @mini_u_d(<2 x i64> * %ptr) {
+define void @mini_u_d(ptr %ptr) {
 ; MSA-LABEL: mini_u_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2119,13 +2119,13 @@ define void @mini_u_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %a, i32 2)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @sldi_d(<2 x i64> * %ptr) {
+define void @sldi_d(ptr %ptr) {
 ; MSA-LABEL: sldi_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2141,13 +2141,13 @@ define void @sldi_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %a, <2 x i64> %a, i32 1)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @slli_d(<2 x i64> * %ptr) {
+define void @slli_d(ptr %ptr) {
 ; MSA-LABEL: slli_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2163,13 +2163,13 @@ define void @slli_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.slli.d(<2 x i64> %a, i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srai_d(<2 x i64> * %ptr) {
+define void @srai_d(ptr %ptr) {
 ; MSA-LABEL: srai_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2185,13 +2185,13 @@ define void @srai_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srai.d(<2 x i64> %a, i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srari_d(<2 x i64> * %ptr) {
+define void @srari_d(ptr %ptr) {
 ; MSA-LABEL: srari_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2207,13 +2207,13 @@ define void @srari_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srari.d(<2 x i64> %a, i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srli_d(<2 x i64> * %ptr) {
+define void @srli_d(ptr %ptr) {
 ; MSA-LABEL: srli_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2229,13 +2229,13 @@ define void @srli_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srli.d(<2 x i64> %a, i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @srlri_d(<2 x i64> * %ptr) {
+define void @srlri_d(ptr %ptr) {
 ; MSA-LABEL: srlri_d:
 ; MSA:       # %bb.0: # %entry
 ; MSA-NEXT:    ld.d $w0, 0($4)
@@ -2251,13 +2251,13 @@ define void @srlri_d(<2 x i64> * %ptr) {
 ; MSA64N32-NEXT:    jr $ra
 ; MSA64N32-NEXT:    st.d $w0, 0($1)
 entry:
-  %a = load <2 x i64>, <2 x i64> * %ptr, align 16
+  %a = load <2 x i64>, ptr %ptr, align 16
   %r = call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %a, i32 3)
-  store <2 x i64> %r, <2 x i64> * %ptr, align 16
+  store <2 x i64> %r, ptr %ptr, align 16
   ret void
 }
 
-define void @ld_d2(<2 x i64> * %ptr, i8 * %ldptr) {
+define void @ld_d2(ptr %ptr, ptr %ldptr) {
 ; MSA32-LABEL: ld_d2:
 ; MSA32:       # %bb.0: # %entry
 ; MSA32-NEXT:    addiu $1, $5, 4096
@@ -2281,8 +2281,8 @@ define void @ld_d2(<2 x i64> * %ptr, i8 * %ldptr) {
 ; MSA64N64-NEXT:    jr $ra
 ; MSA64N64-NEXT:    st.d $w0, 0($4)
 entry:
-  %a = call <2 x i64> @llvm.mips.ld.d(i8* %ldptr, i32 4096)
-  store <2 x i64> %a, <2 x i64> * %ptr, align 16
+  %a = call <2 x i64> @llvm.mips.ld.d(ptr %ldptr, i32 4096)
+  store <2 x i64> %a, ptr %ptr, align 16
   ret void
 }
 
@@ -2388,11 +2388,11 @@ declare i64 @llvm.mips.copy.u.d(<2 x i64>, i32)
 declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32)
 declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32)
 declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32)
-declare <16 x i8> @llvm.mips.ld.b(i8*, i32)
-declare <8 x i16> @llvm.mips.ld.h(i8*, i32)
-declare <4 x i32> @llvm.mips.ld.w(i8*, i32)
-declare <2 x i64> @llvm.mips.ld.d(i8*, i32)
-declare void @llvm.mips.st.b(<16 x i8>, i8*, i32)
-declare void @llvm.mips.st.h(<8 x i16>, i8*, i32)
-declare void @llvm.mips.st.w(<4 x i32>, i8*, i32)
-declare void @llvm.mips.st.d(<2 x i64>, i8*, i32)
+declare <16 x i8> @llvm.mips.ld.b(ptr, i32)
+declare <8 x i16> @llvm.mips.ld.h(ptr, i32)
+declare <4 x i32> @llvm.mips.ld.w(ptr, i32)
+declare <2 x i64> @llvm.mips.ld.d(ptr, i32)
+declare void @llvm.mips.st.b(<16 x i8>, ptr, i32)
+declare void @llvm.mips.st.h(<8 x i16>, ptr, i32)
+declare void @llvm.mips.st.w(<4 x i32>, ptr, i32)
+declare void @llvm.mips.st.d(<2 x i64>, ptr, i32)

diff  --git a/llvm/test/CodeGen/Mips/msa/inline-asm.ll b/llvm/test/CodeGen/Mips/msa/inline-asm.ll
index 55cf14ee2d99a..57cd78a25647c 100644
--- a/llvm/test/CodeGen/Mips/msa/inline-asm.ll
+++ b/llvm/test/CodeGen/Mips/msa/inline-asm.ll
@@ -9,26 +9,26 @@ entry:
   ; CHECK-LABEL: test1:
   %0 = call <4 x i32> asm "ldi.w ${0:w}, 1", "=f"()
   ; CHECK: ldi.w $w{{[1-3]?[0-9]}}, 1
-  store <4 x i32> %0, <4 x i32>* @v4i32_r
+  store <4 x i32> %0, ptr @v4i32_r
   ret void
 }
 
 define void @test2() nounwind {
 entry:
   ; CHECK-LABEL: test2:
-  %0 = load <4 x i32>, <4 x i32>* @v4i32_r
+  %0 = load <4 x i32>, ptr @v4i32_r
   %1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0)
   ; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1
-  store <4 x i32> %1, <4 x i32>* @v4i32_r
+  store <4 x i32> %1, ptr @v4i32_r
   ret void
 }
 
 define void @test3() nounwind {
 entry:
   ; CHECK-LABEL: test3:
-  %0 = load <4 x i32>, <4 x i32>* @v4i32_r
+  %0 = load <4 x i32>, ptr @v4i32_r
   %1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0)
   ; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1
-  store <4 x i32> %1, <4 x i32>* @v4i32_r
+  store <4 x i32> %1, ptr @v4i32_r
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/ldr_str.ll b/llvm/test/CodeGen/Mips/msa/ldr_str.ll
index 51c8bcd3fdbc5..74f84aa9bbd7a 100644
--- a/llvm/test/CodeGen/Mips/msa/ldr_str.ll
+++ b/llvm/test/CodeGen/Mips/msa/ldr_str.ll
@@ -8,7 +8,7 @@
 
 ; Test intrinsics for 4-byte and 8-byte MSA load and stores.
 
-define void @llvm_mips_ldr_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_ldr_d_test(ptr %val, ptr %ptr) nounwind {
 ; MIPS32R5-EB-LABEL: llvm_mips_ldr_d_test:
 ; MIPS32R5-EB:       # %bb.0: # %entry
 ; MIPS32R5-EB-NEXT:    # implicit-def: $v0
@@ -62,14 +62,14 @@ define void @llvm_mips_ldr_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
 ; MIPS64R6-NEXT:    st.d $w0, 0($4)
 ; MIPS64R6-NEXT:    jrc $ra
 entry:
-  %0 = tail call <2 x i64> @llvm.mips.ldr.d(i8* %ptr, i32 16)
-  store <2 x i64> %0, <2 x i64>* %val
+  %0 = tail call <2 x i64> @llvm.mips.ldr.d(ptr %ptr, i32 16)
+  store <2 x i64> %0, ptr %val
   ret void
 }
 
-declare <2 x i64> @llvm.mips.ldr.d(i8*, i32) nounwind
+declare <2 x i64> @llvm.mips.ldr.d(ptr, i32) nounwind
 
-define void @llvm_mips_ldr_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_ldr_w_test(ptr %val, ptr %ptr) nounwind {
 ; MIPS32R5-EB-LABEL: llvm_mips_ldr_w_test:
 ; MIPS32R5-EB:       # %bb.0: # %entry
 ; MIPS32R5-EB-NEXT:    # implicit-def: $at
@@ -111,14 +111,14 @@ define void @llvm_mips_ldr_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
 ; MIPS64R6-NEXT:    st.w $w0, 0($4)
 ; MIPS64R6-NEXT:    jrc $ra
 entry:
-  %0 = tail call <4 x i32> @llvm.mips.ldr.w(i8* %ptr, i32 16)
-  store <4 x i32> %0, <4 x i32>* %val
+  %0 = tail call <4 x i32> @llvm.mips.ldr.w(ptr %ptr, i32 16)
+  store <4 x i32> %0, ptr %val
   ret void
 }
 
-declare <4 x i32> @llvm.mips.ldr.w(i8*, i32) nounwind
+declare <4 x i32> @llvm.mips.ldr.w(ptr, i32) nounwind
 
-define void @llvm_mips_str_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_str_d_test(ptr %val, ptr %ptr) nounwind {
 ; MIPS32R5-EB-LABEL: llvm_mips_str_d_test:
 ; MIPS32R5-EB:       # %bb.0: # %entry
 ; MIPS32R5-EB-NEXT:    ld.d $w0, 0($4)
@@ -168,14 +168,14 @@ define void @llvm_mips_str_d_test(<2 x i64>* %val, i8* %ptr) nounwind {
 ; MIPS64R6-NEXT:    sd $1, 16($5)
 ; MIPS64R6-NEXT:    jrc $ra
 entry:
-  %0 = load <2 x i64>, <2 x i64>* %val
-  tail call void @llvm.mips.str.d(<2 x i64> %0, i8* %ptr, i32 16)
+  %0 = load <2 x i64>, ptr %val
+  tail call void @llvm.mips.str.d(<2 x i64> %0, ptr %ptr, i32 16)
   ret void
 }
 
-declare void @llvm.mips.str.d(<2 x i64>, i8*, i32) nounwind
+declare void @llvm.mips.str.d(<2 x i64>, ptr, i32) nounwind
 
-define void @llvm_mips_str_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
+define void @llvm_mips_str_w_test(ptr %val, ptr %ptr) nounwind {
 ; MIPS32R5-EB-LABEL: llvm_mips_str_w_test:
 ; MIPS32R5-EB:       # %bb.0: # %entry
 ; MIPS32R5-EB-NEXT:    ld.w $w0, 0($4)
@@ -215,10 +215,10 @@ define void @llvm_mips_str_w_test(<4 x i32>* %val, i8* %ptr) nounwind {
 ; MIPS64R6-NEXT:    sw $1, 16($5)
 ; MIPS64R6-NEXT:    jrc $ra
 entry:
-  %0 = load <4 x i32>, <4 x i32>* %val
-  tail call void @llvm.mips.str.w(<4 x i32> %0, i8* %ptr, i32 16)
+  %0 = load <4 x i32>, ptr %val
+  tail call void @llvm.mips.str.w(<4 x i32> %0, ptr %ptr, i32 16)
   ret void
 }
 
-declare void @llvm.mips.str.w(<4 x i32>, i8*, i32) nounwind
+declare void @llvm.mips.str.w(<4 x i32>, ptr, i32) nounwind
 

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
index 2ff6c5e369eee..db2b52028a892 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
@@ -7,25 +7,25 @@
 ; "Unexpected illegal type!" assertion.
 ; It should at least successfully build.
 
-define void @autogen_SD1704963983(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD1704963983(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca <4 x double>
   %A3 = alloca <8 x i64>
   %A2 = alloca <1 x double>
   %A1 = alloca double
   %A = alloca i32
-  %L = load i8, i8* %0
-  store i8 77, i8* %0
+  %L = load i8, ptr %0
+  store i8 77, ptr %0
   %E = extractelement <8 x i64> zeroinitializer, i32 2
   %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15, i32 1, i32 3>
   %I = insertelement <8 x i64> zeroinitializer, i64 %E, i32 7
-  %Sl = select i1 false, i8* %0, i8* %0
+  %Sl = select i1 false, ptr %0, ptr %0
   %Cmp = icmp eq i32 434069, 272505
   br label %CF
 
 CF:                                               ; preds = %CF, %CF78, %BB
-  %L5 = load i8, i8* %Sl
-  store i8 %L, i8* %Sl
+  %L5 = load i8, ptr %Sl
+  store i8 %L, ptr %Sl
   %E6 = extractelement <8 x i32> zeroinitializer, i32 2
   %Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 undef>
   %I8 = insertelement <8 x i64> zeroinitializer, i64 %4, i32 7
@@ -33,8 +33,8 @@ CF:                                               ; preds = %CF, %CF78, %BB
   %FC = sitofp <8 x i64> zeroinitializer to <8 x float>
   %Sl9 = select i1 %Cmp, i8 77, i8 77
   %Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer
-  %L11 = load i8, i8* %0
-  store i8 %Sl9, i8* %0
+  %L11 = load i8, ptr %0
+  store i8 %Sl9, ptr %0
   %E12 = extractelement <1 x i16> zeroinitializer, i32 0
   %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7>
   %I14 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
@@ -42,8 +42,8 @@ CF:                                               ; preds = %CF, %CF78, %BB
   %Tr = trunc <8 x i64> %Shuff to <8 x i32>
   %Sl16 = select i1 %Cmp, i8 77, i8 %5
   %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10
-  %L18 = load i8, i8* %Sl
-  store i8 -1, i8* %Sl
+  %L18 = load i8, ptr %Sl
+  store i8 -1, ptr %Sl
   %E19 = extractelement <8 x i32> zeroinitializer, i32 3
   %Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> <i32 6, i32 8, i32 undef, i32 12, i32 14, i32 0, i32 2, i32 undef>
   %I21 = insertelement <8 x i64> %Shuff13, i64 %E, i32 0
@@ -54,8 +54,8 @@ CF:                                               ; preds = %CF, %CF78, %BB
   br i1 %Cmp25, label %CF, label %CF78
 
 CF78:                                             ; preds = %CF
-  %L26 = load i8, i8* %Sl
-  store i32 50347, i32* %A
+  %L26 = load i8, ptr %Sl
+  store i32 50347, ptr %A
   %E27 = extractelement <8 x i1> %Cmp10, i32 2
   br i1 %E27, label %CF, label %CF77
 
@@ -65,48 +65,46 @@ CF77:                                             ; preds = %CF77, %CF81, %CF78
   %B30 = urem <8 x i32> %Tr, zeroinitializer
   %Tr31 = trunc i32 0 to i16
   %Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
-  %L33 = load i8, i8* %Sl
-  store i8 %L26, i8* %Sl
+  %L33 = load i8, ptr %Sl
+  store i8 %L26, ptr %Sl
   %E34 = extractelement <4 x i32> zeroinitializer, i32 0
   %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef
   %I36 = insertelement <8 x i64> %Shuff28, i64 %E, i32 7
   %B37 = srem <1 x i16> %I29, zeroinitializer
   %FC38 = sitofp <8 x i32> %B30 to <8 x double>
   %Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24
-  %L40 = load i8, i8* %Sl
-  store i8 %Sl16, i8* %Sl
+  %L40 = load i8, ptr %Sl
+  store i8 %Sl16, ptr %Sl
   %E41 = extractelement <1 x i16> zeroinitializer, i32 0
   %Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> <i32 14, i32 undef, i32 2, i32 4, i32 undef, i32 8, i32 10, i32 12>
   %I43 = insertelement <4 x i32> zeroinitializer, i32 272505, i32 0
   %B44 = urem <8 x i32> %B30, %Tr
-  %PC = bitcast i8* %0 to i64*
   %Sl45 = select i1 %Cmp, <8 x i1> %Cmp10, <8 x i1> %Shuff42
   %Cmp46 = fcmp ugt float 0xB856238A00000000, 0x47DA795E40000000
   br i1 %Cmp46, label %CF77, label %CF80
 
 CF80:                                             ; preds = %CF80, %CF77
-  %L47 = load i64, i64* %PC
-  store i8 77, i8* %Sl
+  %L47 = load i64, ptr %0
+  store i8 77, ptr %Sl
   %E48 = extractelement <8 x i64> zeroinitializer, i32 2
   %Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> <i32 5, i32 7, i32 9, i32 undef, i32 undef, i32 undef, i32 undef, i32 3>
   %I50 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
   %B51 = fdiv float 0x46CC2D8000000000, %FC23
-  %PC52 = bitcast <8 x i64>* %A3 to i64*
   %Sl53 = select i1 %Cmp, <8 x i64> %Shuff, <8 x i64> %Shuff
   %Cmp54 = fcmp ole float 0x47DA795E40000000, 0xB856238A00000000
   br i1 %Cmp54, label %CF80, label %CF81
 
 CF81:                                             ; preds = %CF80
-  %L55 = load i8, i8* %Sl
-  store i8 %Sl16, i8* %Sl
+  %L55 = load i8, ptr %Sl
+  store i8 %Sl16, ptr %Sl
   %E56 = extractelement <1 x i16> %B, i32 0
   %Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> <i32 1>
   %I58 = insertelement <8 x i64> zeroinitializer, i64 %L47, i32 7
   %B59 = srem i32 %E19, %E19
   %Sl60 = select i1 %Cmp, i8 77, i8 77
   %Cmp61 = icmp ult <1 x i16> zeroinitializer, %B
-  %L62 = load i8, i8* %Sl
-  store i64 %L47, i64* %PC52
+  %L62 = load i8, ptr %Sl
+  store i64 %L47, ptr %A3
   %E63 = extractelement <4 x i32> %I43, i32 2
   %Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
   %I65 = insertelement <8 x i64> %B22, i64 %L47, i32 7
@@ -117,18 +115,18 @@ CF81:                                             ; preds = %CF80
   br i1 %Cmp69, label %CF77, label %CF79
 
 CF79:                                             ; preds = %CF81
-  %L70 = load i32, i32* %A
-  store i64 %4, i64* %PC
+  %L70 = load i32, ptr %A
+  store i64 %4, ptr %0
   %E71 = extractelement <4 x i32> zeroinitializer, i32 0
   %Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 undef, i32 7, i32 9>
   %I73 = insertelement <8 x i16> zeroinitializer, i16 %E12, i32 5
   %B74 = fsub double 0.000000e+00, 0.000000e+00
   %Sl75 = select i1 %Cmp46, i32 %E6, i32 %E19
   %Cmp76 = icmp ugt <4 x i32> %I43, zeroinitializer
-  store i8 %L, i8* %Sl
-  store i64 %L47, i64* %PC
-  store i64 %L47, i64* %PC
-  store i8 %L5, i8* %Sl
-  store i8 %L5, i8* %0
+  store i8 %L, ptr %Sl
+  store i64 %L47, ptr %0
+  store i64 %L47, ptr %0
+  store i8 %L5, ptr %Sl
+  store i8 %L5, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
index bec350ce7001f..a220b9887f901 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
@@ -7,23 +7,23 @@
 ; `Opc && "Cannot copy registers"' assertion.
 ; It should at least successfully build.
 
-define void @autogen_SD1935737938(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD1935737938(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca i64
   %A3 = alloca <4 x i32>
   %A2 = alloca i64
   %A1 = alloca i32
   %A = alloca <2 x i64>
-  %L = load i8, i8* %0
-  store i8 -1, i8* %0
+  %L = load i8, ptr %0
+  store i8 -1, ptr %0
   %E = extractelement <2 x i32> zeroinitializer, i32 0
   %Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
   %I = insertelement <1 x i64> <i64 -1>, i64 286689, i32 0
   %B = lshr i8 %L, -69
   %ZE = fpext float 0xBF2AA5FE80000000 to double
   %Sl = select i1 true, <1 x i64> <i64 -1>, <1 x i64> <i64 -1>
-  %L5 = load i8, i8* %0
-  store i8 -69, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 -69, ptr %0
   %E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
   %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
   %I8 = insertelement <2 x i32> zeroinitializer, i32 135673, i32 1
@@ -31,8 +31,8 @@ BB:
   %FC = uitofp i32 %3 to double
   %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer
   %Cmp = icmp ne <1 x i64> %I, <i64 -1>
-  %L11 = load i8, i8* %0
-  store i8 %L11, i8* %0
+  %L11 = load i8, ptr %0
+  store i8 %L11, ptr %0
   %E12 = extractelement <1 x i64> <i64 -1>, i32 0
   %Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> <i64 -1>, <1 x i32> <i32 1>
   %I14 = insertelement <1 x i64> %I, i64 303290, i32 0
@@ -42,16 +42,16 @@ BB:
   br label %CF74
 
 CF74:                                             ; preds = %CF74, %CF80, %CF76, %BB
-  %L18 = load i8, i8* %0
-  store i8 -69, i8* %0
+  %L18 = load i8, ptr %0
+  store i8 -69, ptr %0
   %E19 = extractelement <1 x i64> %Sl, i32 0
   %Shuff20 = shufflevector <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i32> <i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10>
   %I21 = insertelement <2 x i32> %Shuff, i32 135673, i32 0
   %B22 = urem i32 135673, %3
   %FC23 = sitofp i8 %L to float
   %Sl24 = select i1 true, i8 %B, i8 %L18
-  %L25 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L25 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E26 = extractelement <2 x i32> %Shuff, i32 1
   %Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 2, i32 0>
   %I28 = insertelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E12, i32 8
@@ -62,16 +62,16 @@ CF74:                                             ; preds = %CF74, %CF80, %CF76,
   br i1 %Cmp31, label %CF74, label %CF80
 
 CF80:                                             ; preds = %CF74
-  %L32 = load i8, i8* %0
-  store i8 -1, i8* %0
+  %L32 = load i8, ptr %0
+  store i8 -1, ptr %0
   %E33 = extractelement <2 x i32> zeroinitializer, i32 1
   %Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> <i64 -1>, <1 x i32> zeroinitializer
   %I35 = insertelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 -1, i32 0
   %FC36 = sitofp <1 x i1> %Cmp to <1 x float>
   %Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27
-  %L39 = load i8, i8* %0
-  store i8 %Sl24, i8* %0
+  %L39 = load i8, ptr %0
+  store i8 %Sl24, ptr %0
   %E40 = extractelement <8 x i64> zeroinitializer, i32 1
   %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> <i32 0, i32 2>
   %I42 = insertelement <4 x i32> zeroinitializer, i32 414573, i32 2
@@ -81,8 +81,8 @@ CF80:                                             ; preds = %CF74
   br i1 %Cmp45, label %CF74, label %CF76
 
 CF76:                                             ; preds = %CF80
-  %L46 = load i8, i8* %0
-  store i8 %L39, i8* %0
+  %L46 = load i8, ptr %0
+  store i8 %L39, ptr %0
   %E47 = extractelement <2 x i32> %Shuff27, i32 0
   %Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> <i32 1>
   %I49 = insertelement <1 x i64> <i64 -1>, i64 %E12, i32 0
@@ -92,8 +92,8 @@ CF76:                                             ; preds = %CF80
   br i1 %Cmp52, label %CF74, label %CF75
 
 CF75:                                             ; preds = %CF75, %CF76
-  %L53 = load i8, i8* %0
-  store i8 %L18, i8* %0
+  %L53 = load i8, ptr %0
+  store i8 %L18, ptr %0
   %E54 = extractelement <8 x i8> %Shuff20, i32 5
   %Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> <i32 0, i32 2>
   %I56 = insertelement <4 x i32> %I42, i32 %B22, i32 2
@@ -103,8 +103,8 @@ CF75:                                             ; preds = %CF75, %CF76
   br i1 %Cmp59, label %CF75, label %CF78
 
 CF78:                                             ; preds = %CF75
-  %L60 = load i8, i8* %0
-  store i8 -69, i8* %0
+  %L60 = load i8, ptr %0
+  store i8 -69, ptr %0
   %E61 = extractelement <2 x i32> zeroinitializer, i32 0
   %Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> <i32 1, i32 3>
   %I63 = insertelement <1 x i1> %Sl16, i1 %Cmp45, i32 0
@@ -115,8 +115,8 @@ CF78:                                             ; preds = %CF75
   br label %CF
 
 CF:                                               ; preds = %CF, %CF78
-  %L68 = load i8, i8* %0
-  store i64 %B57, i64* %2
+  %L68 = load i8, ptr %0
+  store i64 %B57, ptr %2
   %E69 = extractelement <2 x i1> %Shuff41, i32 1
   br i1 %E69, label %CF, label %CF77
 
@@ -129,10 +129,10 @@ CF77:                                             ; preds = %CF77, %CF
   br i1 %Cmp73, label %CF77, label %CF79
 
 CF79:                                             ; preds = %CF77
-  store i8 %L18, i8* %0
-  store i8 %E54, i8* %0
-  store i8 %L39, i8* %0
-  store i8 %L39, i8* %0
-  store i8 %B, i8* %0
+  store i8 %L18, ptr %0
+  store i8 %E54, ptr %0
+  store i8 %L39, ptr %0
+  store i8 %L39, ptr %0
+  store i8 %B, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
index d8e5cdb963566..d1d2f0db69dcf 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
@@ -6,15 +6,15 @@
 ; This test originally failed for MSA after dereferencing a null this pointer.
 ; It should at least successfully build.
 
-define void @autogen_SD2704903805(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD2704903805(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca i32
   %A3 = alloca i32
   %A2 = alloca i8
   %A1 = alloca i32
   %A = alloca i8
-  %L = load i8, i8* %0
-  store i8 %5, i8* %0
+  %L = load i8, ptr %0
+  store i8 %5, ptr %0
   %E = extractelement <2 x i16> zeroinitializer, i32 0
   %Shuff = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> undef
   %I = insertelement <1 x i8> <i8 -1>, i8 85, i32 0
@@ -25,8 +25,8 @@ BB:
   br label %CF83
 
 CF83:                                             ; preds = %BB
-  %L5 = load i8, i8* %0
-  store i8 85, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 85, ptr %0
   %E6 = extractelement <1 x i8> <i8 -1>, i32 0
   %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 1, i32 3>
   %I8 = insertelement <4 x i16> zeroinitializer, i16 %E, i32 3
@@ -37,8 +37,8 @@ CF83:                                             ; preds = %BB
   br label %CF
 
 CF:                                               ; preds = %CF, %CF81, %CF83
-  %L13 = load i8, i8* %0
-  store i8 0, i8* %0
+  %L13 = load i8, ptr %0
+  store i8 0, ptr %0
   %E14 = extractelement <2 x i64> zeroinitializer, i32 0
   %Shuff15 = shufflevector <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 3, i32 5, i32 7, i32 undef>
   %I16 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 81222, i32 1
@@ -52,24 +52,23 @@ CF80:                                             ; preds = %CF80, %CF
   br i1 %Cmp19, label %CF80, label %CF81
 
 CF81:                                             ; preds = %CF80
-  %L20 = load i8, i8* %0
-  store i8 85, i8* %0
+  %L20 = load i8, ptr %0
+  store i8 85, ptr %0
   %E21 = extractelement <1 x i8> <i8 -1>, i32 0
   %Shuff22 = shufflevector <1 x i8> <i8 -1>, <1 x i8> %Shuff, <1 x i32> zeroinitializer
   %I23 = insertelement <1 x i8> <i8 -1>, i8 %L5, i32 0
   %FC24 = fptoui <4 x float> %FC to <4 x i16>
   %Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -1, i32 -1>
   %Cmp26 = icmp ult <4 x i64> %I16, %Shuff15
-  %L27 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L27 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E28 = extractelement <1 x i8> <i8 -1>, i32 0
   %Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> <i32 11, i32 undef, i32 15, i32 1, i32 3, i32 5, i32 undef, i32 9>
   %I30 = insertelement <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, i64 %E14, i32 1
   %B31 = mul i8 %E28, 85
-  %PC = bitcast i32* %A3 to i32*
   %Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000
-  %L33 = load i32, i32* %PC
-  store i32 %L33, i32* %PC
+  %L33 = load i32, ptr %A3
+  store i32 %L33, ptr %A3
   %E34 = extractelement <2 x i16> zeroinitializer, i32 1
   %Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> <i8 -1>, <1 x i32> zeroinitializer
   %I36 = insertelement <1 x i8> <i8 -1>, i8 %L13, i32 0
@@ -79,8 +78,8 @@ CF81:                                             ; preds = %CF80
   br i1 %Cmp39, label %CF, label %CF77
 
 CF77:                                             ; preds = %CF77, %CF81
-  %L40 = load i32, i32* %PC
-  store i32 %3, i32* %PC
+  %L40 = load i32, ptr %A3
+  store i32 %3, ptr %A3
   %E41 = extractelement <2 x i32> zeroinitializer, i32 0
   %Shuff42 = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3>
   %I43 = insertelement <1 x i8> <i8 -1>, i8 0, i32 0
@@ -88,8 +87,8 @@ CF77:                                             ; preds = %CF77, %CF81
   %Se = sext i32 %3 to i64
   %Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43
   %Cmp46 = icmp sge <1 x i8> %I36, %Shuff
-  %L47 = load i32, i32* %PC
-  store i32 %L33, i32* %PC
+  %L47 = load i32, ptr %A3
+  store i32 %L33, ptr %A3
   %E48 = extractelement <2 x i16> zeroinitializer, i32 0
   %Shuff49 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
   %I50 = insertelement <2 x i32> %Sl25, i32 47963, i32 1
@@ -100,8 +99,8 @@ CF77:                                             ; preds = %CF77, %CF81
   br i1 %Cmp54, label %CF77, label %CF78
 
 CF78:                                             ; preds = %CF78, %CF77
-  %L55 = load i32, i32* %PC
-  store i32 %L33, i32* %PC
+  %L55 = load i32, ptr %A3
+  store i32 %L33, ptr %A3
   %E56 = extractelement <8 x i16> %Shuff29, i32 4
   %Shuff57 = shufflevector <1 x i8> <i8 -1>, <1 x i8> <i8 -1>, <1 x i32> <i32 1>
   %I58 = insertelement <1 x i8> %B51, i8 %Sl53, i32 0
@@ -111,8 +110,8 @@ CF78:                                             ; preds = %CF78, %CF77
   br i1 %Cmp60, label %CF78, label %CF79
 
 CF79:                                             ; preds = %CF79, %CF78
-  %L61 = load i32, i32* %PC
-  store i32 %L33, i32* %A3
+  %L61 = load i32, ptr %A3
+  store i32 %L33, ptr %A3
   %E62 = extractelement <4 x i64> %Shuff15, i32 1
   %Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> <i32 undef, i32 10, i32 12, i32 undef, i32 undef, i32 undef, i32 4, i32 6>
   %I64 = insertelement <2 x i64> zeroinitializer, i64 %Se, i32 0
@@ -123,8 +122,8 @@ CF79:                                             ; preds = %CF79, %CF78
   br i1 %Cmp68, label %CF79, label %CF82
 
 CF82:                                             ; preds = %CF79
-  %L69 = load i32, i32* %PC
-  store i32 %L33, i32* %PC
+  %L69 = load i32, ptr %A3
+  store i32 %L33, ptr %A3
   %E70 = extractelement <8 x i16> zeroinitializer, i32 3
   %Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i32> <i32 6, i32 undef, i32 2, i32 4>
   %I72 = insertelement <1 x i8> <i8 -1>, i8 %L, i32 0
@@ -132,10 +131,10 @@ CF82:                                             ; preds = %CF79
   %ZE74 = zext <4 x i1> %Cmp26 to <4 x i32>
   %Sl75 = select i1 %Cmp, i32 463279, i32 %L61
   %Cmp76 = icmp sgt <1 x i8> %Shuff49, %Shuff22
-  store i8 %B31, i8* %0
-  store i8 85, i8* %0
-  store i32 %L33, i32* %PC
-  store i8 %B65, i8* %0
-  store i8 %L5, i8* %0
+  store i8 %B31, ptr %0
+  store i8 85, ptr %0
+  store i32 %L33, ptr %A3
+  store i8 %B65, ptr %0
+  store i8 %L5, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
index ec91e6ccbaa4a..c3158801bee57 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
@@ -7,23 +7,23 @@
 ; "Don't know how to expand this condition!" unreachable.
 ; It should at least successfully build.
 
-define void @autogen_SD3861334421(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3861334421(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca <2 x i32>
   %A3 = alloca <2 x double>
   %A2 = alloca i64
   %A1 = alloca i64
   %A = alloca double
-  %L = load i8, i8* %0
-  store i8 -101, i8* %0
+  %L = load i8, ptr %0
+  store i8 -101, ptr %0
   %E = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
   %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1>
   %I = insertelement <8 x i64> zeroinitializer, i64 %4, i32 5
   %B = and i64 116376, 57247
   %FC = uitofp i8 7 to double
   %Sl = select i1 false, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
-  %L5 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E6 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 3
   %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
   %I8 = insertelement <8 x i8> %Sl, i8 7, i32 4
@@ -33,8 +33,8 @@ BB:
   br label %CF
 
 CF:                                               ; preds = %CF, %BB
-  %L11 = load i8, i8* %0
-  store i8 -87, i8* %0
+  %L11 = load i8, ptr %0
+  store i8 -87, ptr %0
   %E12 = extractelement <4 x i64> zeroinitializer, i32 0
   %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> <i32 7, i32 9, i32 11, i32 13, i32 undef, i32 1, i32 3, i32 5>
   %I14 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 1
@@ -45,8 +45,8 @@ CF:                                               ; preds = %CF, %BB
   br i1 %Cmp18, label %CF, label %CF80
 
 CF80:                                             ; preds = %CF80, %CF88, %CF
-  %L19 = load i8, i8* %0
-  store i8 -101, i8* %0
+  %L19 = load i8, ptr %0
+  store i8 -101, ptr %0
   %E20 = extractelement <4 x i64> zeroinitializer, i32 0
   %Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
   %I22 = insertelement <4 x i64> zeroinitializer, i64 127438, i32 1
@@ -56,8 +56,8 @@ CF80:                                             ; preds = %CF80, %CF88, %CF
   br i1 %Cmp25, label %CF80, label %CF83
 
 CF83:                                             ; preds = %CF83, %CF80
-  %L26 = load i8, i8* %0
-  store i8 -87, i8* %0
+  %L26 = load i8, ptr %0
+  store i8 -87, ptr %0
   %E27 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 0
   %Shuff28 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 7, i32 1, i32 3, i32 5>
   %I29 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 492085, i32 1
@@ -68,8 +68,8 @@ CF83:                                             ; preds = %CF83, %CF80
   br i1 %Cmp33, label %CF83, label %CF88
 
 CF88:                                             ; preds = %CF83
-  %L34 = load i8, i8* %0
-  store i8 -87, i8* %0
+  %L34 = load i8, ptr %0
+  store i8 -87, ptr %0
   %E35 = extractelement <8 x i64> %Shuff, i32 7
   %Shuff36 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %Shuff28, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
   %I37 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 0
@@ -80,8 +80,8 @@ CF88:                                             ; preds = %CF83
   br i1 %Cmp40, label %CF80, label %CF81
 
 CF81:                                             ; preds = %CF81, %CF85, %CF87, %CF88
-  %L41 = load i8, i8* %0
-  store i8 %L34, i8* %0
+  %L41 = load i8, ptr %0
+  store i8 %L34, ptr %0
   %E42 = extractelement <8 x i64> %Shuff13, i32 6
   %Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 7>
   %I44 = insertelement <4 x i64> zeroinitializer, i64 116376, i32 3
@@ -92,8 +92,8 @@ CF81:                                             ; preds = %CF81, %CF85, %CF87,
   br i1 %Cmp47, label %CF81, label %CF85
 
 CF85:                                             ; preds = %CF81
-  %L48 = load i8, i8* %0
-  store i8 -101, i8* %0
+  %L48 = load i8, ptr %0
+  store i8 -101, ptr %0
   %E49 = extractelement <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i32 2
   %Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
   %I51 = insertelement <4 x i64> zeroinitializer, i64 %E20, i32 3
@@ -101,8 +101,8 @@ CF85:                                             ; preds = %CF81
   %FC53 = uitofp i8 %L48 to double
   %Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24
   %Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer
-  %L56 = load i8, i8* %0
-  store i8 %L11, i8* %0
+  %L56 = load i8, ptr %0
+  store i8 %L11, ptr %0
   %E57 = extractelement <4 x i64> %Shuff21, i32 1
   %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2>
   %I59 = insertelement <4 x i64> zeroinitializer, i64 %E42, i32 2
@@ -113,8 +113,8 @@ CF85:                                             ; preds = %CF81
 CF84:                                             ; preds = %CF84, %CF85
   %Sl62 = select i1 false, i8 %L, i8 %L48
   %Cmp63 = icmp ne <8 x i64> %I, zeroinitializer
-  %L64 = load i8, i8* %0
-  store i8 %5, i8* %0
+  %L64 = load i8, ptr %0
+  store i8 %5, ptr %0
   %E65 = extractelement <8 x i1> %Cmp55, i32 0
   br i1 %E65, label %CF84, label %CF87
 
@@ -125,8 +125,8 @@ CF87:                                             ; preds = %CF84
   %ZE69 = zext <8 x i8> %Sl32 to <8 x i64>
   %Sl70 = select i1 %Tr61, i64 %E20, i64 %E12
   %Cmp71 = icmp slt <8 x i64> %I, %Shuff
-  %L72 = load i8, i8* %0
-  store i8 %L72, i8* %0
+  %L72 = load i8, ptr %0
+  store i8 %L72, ptr %0
   %E73 = extractelement <8 x i1> %Cmp55, i32 6
   br i1 %E73, label %CF81, label %CF82
 
@@ -135,15 +135,15 @@ CF82:                                             ; preds = %CF82, %CF87
   %I75 = insertelement <4 x i64> zeroinitializer, i64 380809, i32 3
   %B76 = fsub double 0.000000e+00, %FC53
   %Tr77 = trunc i32 %E to i8
-  %Sl78 = select i1 %Cmp18, i64* %A2, i64* %2
+  %Sl78 = select i1 %Cmp18, ptr %A2, ptr %2
   %Cmp79 = icmp eq i32 394647, 492085
   br i1 %Cmp79, label %CF82, label %CF86
 
 CF86:                                             ; preds = %CF82
-  store i64 %Sl70, i64* %Sl78
-  store i64 %E57, i64* %Sl78
-  store i64 %Sl70, i64* %Sl78
-  store i64 %B, i64* %Sl78
-  store i64 %Sl10, i64* %Sl78
+  store i64 %Sl70, ptr %Sl78
+  store i64 %E57, ptr %Sl78
+  store i64 %Sl70, ptr %Sl78
+  store i64 %B, ptr %Sl78
+  store i64 %Sl10, ptr %Sl78
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
index 27f1eb1e9250b..ace3f54b33a37 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
@@ -7,23 +7,23 @@
 ; "Type for zero vector elements is not legal" assertion.
 ; It should at least successfully build.
 
-define void @autogen_SD3926023935(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3926023935(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca i1
   %A3 = alloca float
   %A2 = alloca double
   %A1 = alloca float
   %A = alloca double
-  %L = load i8, i8* %0
-  store i8 -123, i8* %0
+  %L = load i8, ptr %0
+  store i8 -123, ptr %0
   %E = extractelement <4 x i64> zeroinitializer, i32 1
   %Shuff = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 0
   %BC = bitcast i64 181325 to double
   %Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer
   %Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer
-  %L5 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E6 = extractelement <4 x i64> zeroinitializer, i32 3
   %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 2, i32 0>
   %I8 = insertelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i64 498254, i32 4
@@ -33,17 +33,16 @@ BB:
   br label %CF80
 
 CF80:                                             ; preds = %BB
-  %L11 = load i8, i8* %0
-  store i8 -123, i8* %0
+  %L11 = load i8, ptr %0
+  store i8 -123, ptr %0
   %E12 = extractelement <2 x i16> zeroinitializer, i32 1
   %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %I14 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B, i32 2
   %B15 = sdiv i64 334618, -1
-  %PC = bitcast i1* %A4 to i64*
   %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
   %Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16
-  %L18 = load double, double* %A2
-  store i64 498254, i64* %PC
+  %L18 = load double, ptr %A2
+  store i64 498254, ptr %A4
   %E19 = extractelement <4 x i64> zeroinitializer, i32 0
   %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
   %I21 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
@@ -51,8 +50,8 @@ CF80:                                             ; preds = %BB
   %ZE = zext <2 x i1> %Shuff20 to <2 x i32>
   %Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer
   %Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer
-  %L25 = load i8, i8* %0
-  store i8 %L25, i8* %0
+  %L25 = load i8, ptr %0
+  store i8 %L25, ptr %0
   %E26 = extractelement <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, i32 3
   %Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> <i32 6, i32 0, i32 undef, i32 4>
   %I28 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 0
@@ -63,20 +62,19 @@ CF80:                                             ; preds = %BB
 CF79:                                             ; preds = %CF80
   %Sl30 = select i1 false, i8 %B29, i8 -123
   %Cmp31 = icmp sge <2 x i1> %I, %I
-  %L32 = load i64, i64* %PC
-  store i8 -123, i8* %0
+  %L32 = load i64, ptr %A4
+  store i8 -123, ptr %0
   %E33 = extractelement <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 2
   %Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
   %I35 = insertelement <4 x i64> zeroinitializer, i64 498254, i32 3
   %B36 = sub <8 x i64> %I8, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
-  %PC37 = bitcast i8* %0 to i1*
   %Sl38 = select i1 %Cmp10, i8 -43, i8 %L5
   %Cmp39 = icmp eq i64 498254, %B15
   br label %CF
 
 CF:                                               ; preds = %CF, %CF79
-  %L40 = load double, double* %A
-  store i1 %Cmp39, i1* %PC37
+  %L40 = load double, ptr %A
+  store i1 %Cmp39, ptr %0
   %E41 = extractelement <4 x i64> zeroinitializer, i32 3
   %Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> <i32 2, i32 undef>
   %I43 = insertelement <4 x i32> %Shuff, i32 %3, i32 0
@@ -90,8 +88,8 @@ CF77:                                             ; preds = %CF77, %CF
   br i1 %Cmp46, label %CF77, label %CF78
 
 CF78:                                             ; preds = %CF78, %CF83, %CF82, %CF77
-  %L47 = load i64, i64* %PC
-  store i8 -123, i8* %0
+  %L47 = load i64, ptr %A4
+  store i8 -123, ptr %0
   %E48 = extractelement <4 x i64> zeroinitializer, i32 3
   %Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
   %I50 = insertelement <2 x i1> zeroinitializer, i1 %Cmp10, i32 0
@@ -105,8 +103,8 @@ CF83:                                             ; preds = %CF78
   br i1 %Cmp54, label %CF78, label %CF82
 
 CF82:                                             ; preds = %CF83
-  %L55 = load i64, i64* %PC
-  store i64 %L32, i64* %PC
+  %L55 = load i64, ptr %A4
+  store i64 %L32, ptr %A4
   %E56 = extractelement <2 x i16> %Shuff7, i32 1
   %Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
   %I58 = insertelement <2 x i32> %Sl, i32 %Tr52, i32 0
@@ -114,8 +112,8 @@ CF82:                                             ; preds = %CF83
   %FC = sitofp i64 498254 to double
   %Sl60 = select i1 false, i64 %E6, i64 -1
   %Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43
-  %L62 = load i64, i64* %PC
-  store i64 %Sl9, i64* %PC
+  %L62 = load i64, ptr %A4
+  store i64 %Sl9, ptr %A4
   %E63 = extractelement <2 x i32> %ZE, i32 0
   %Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> <i32 1, i32 3, i32 undef, i32 7>
   %I65 = insertelement <4 x i32> %Shuff, i32 %3, i32 3
@@ -126,18 +124,18 @@ CF82:                                             ; preds = %CF83
 
 CF81:                                             ; preds = %CF82
   %Cmp69 = icmp ne <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
-  %L70 = load i8, i8* %0
-  store i64 %L55, i64* %PC
+  %L70 = load i8, ptr %0
+  store i64 %L55, ptr %A4
   %E71 = extractelement <4 x i32> %Shuff49, i32 1
   %Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   %I73 = insertelement <4 x i64> %Shuff64, i64 %E, i32 2
   %B74 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %B36
   %Sl75 = select i1 %Sl68, i64 %B51, i64 %L55
   %Cmp76 = icmp sgt <8 x i64> %B74, %B36
-  store i1 %Cmp39, i1* %PC37
-  store i64 %E41, i64* %PC
-  store i64 %L32, i64* %PC
-  store i64 %Sl75, i64* %2
-  store i64 %L32, i64* %PC
+  store i1 %Cmp39, ptr %0
+  store i64 %E41, ptr %A4
+  store i64 %L32, ptr %A4
+  store i64 %Sl75, ptr %2
+  store i64 %L32, ptr %A4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
index 9c8e4605eae8d..d88dfb6bd29ae 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
@@ -7,35 +7,34 @@
 ; v4f32 on MSA.
 ; It should at least successfully build.
 
-define void @autogen_SD3997499501(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD3997499501(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca <1 x double>
   %A3 = alloca double
   %A2 = alloca float
   %A1 = alloca double
   %A = alloca double
-  %L = load i8, i8* %0
-  store i8 97, i8* %0
+  %L = load i8, ptr %0
+  store i8 97, ptr %0
   %E = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14
   %Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3>
   %I = insertelement <4 x i64> zeroinitializer, i64 0, i32 3
   %Tr = trunc <1 x i64> zeroinitializer to <1 x i8>
-  %Sl = select i1 false, double* %A1, double* %A
+  %Sl = select i1 false, ptr %A1, ptr %A
   %Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer
-  %L5 = load double, double* %Sl
-  store float -4.374162e+06, float* %A2
+  %L5 = load double, ptr %Sl
+  store float -4.374162e+06, ptr %A2
   %E6 = extractelement <4 x i64> zeroinitializer, i32 3
   %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> <i32 2, i32 4, i32 6, i32 undef>
   %I8 = insertelement <2 x i1> %Shuff, i1 false, i32 0
   %B = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1>
-  %PC = bitcast float* %A2 to float*
   %Sl9 = select i1 false, i32 82299, i32 0
   %Cmp10 = icmp slt i8 97, %5
   br label %CF72
 
 CF72:                                             ; preds = %CF72, %CF80, %CF78, %BB
-  %L11 = load double, double* %Sl
-  store double 0.000000e+00, double* %Sl
+  %L11 = load double, ptr %Sl
+  store double 0.000000e+00, ptr %Sl
   %E12 = extractelement <2 x i1> zeroinitializer, i32 0
   br i1 %E12, label %CF72, label %CF80
 
@@ -49,8 +48,8 @@ CF80:                                             ; preds = %CF72
   br i1 %Cmp17, label %CF72, label %CF77
 
 CF77:                                             ; preds = %CF77, %CF80
-  %L18 = load double, double* %Sl
-  store double 0.000000e+00, double* %Sl
+  %L18 = load double, ptr %Sl
+  store double 0.000000e+00, ptr %Sl
   %E19 = extractelement <2 x i1> zeroinitializer, i32 0
   br i1 %E19, label %CF77, label %CF78
 
@@ -60,8 +59,8 @@ CF78:                                             ; preds = %CF77
   %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer
   %FC = uitofp i8 97 to double
   %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer
-  %L24 = load double, double* %Sl
-  store float %Sl16, float* %PC
+  %L24 = load double, ptr %Sl
+  store float %Sl16, ptr %A2
   %E25 = extractelement <2 x i1> %Shuff, i32 1
   br i1 %E25, label %CF72, label %CF76
 
@@ -71,8 +70,8 @@ CF76:                                             ; preds = %CF78
   %B28 = mul <4 x i64> %I27, zeroinitializer
   %ZE = zext <8 x i1> zeroinitializer to <8 x i64>
   %Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06
-  %L30 = load i8, i8* %0
-  store double %L5, double* %Sl
+  %L30 = load i8, ptr %0
+  store double %L5, ptr %Sl
   %E31 = extractelement <8 x i1> zeroinitializer, i32 5
   br label %CF
 
@@ -85,8 +84,8 @@ CF:                                               ; preds = %CF, %CF81, %CF76
   br i1 %Cmp36, label %CF, label %CF74
 
 CF74:                                             ; preds = %CF74, %CF
-  %L37 = load float, float* %PC
-  store double 0.000000e+00, double* %Sl
+  %L37 = load float, ptr %A2
+  store double 0.000000e+00, ptr %Sl
   %E38 = extractelement <2 x i1> %Sl23, i32 1
   br i1 %E38, label %CF74, label %CF75
 
@@ -95,8 +94,8 @@ CF75:                                             ; preds = %CF75, %CF82, %CF74
   %I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2
   %Sl41 = select i1 %Cmp10, i32 0, i32 %3
   %Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer
-  %L43 = load double, double* %Sl
-  store i64 %4, i64* %2
+  %L43 = load double, ptr %Sl
+  store i64 %4, ptr %2
   %E44 = extractelement <2 x i1> %Shuff20, i32 1
   br i1 %E44, label %CF75, label %CF82
 
@@ -109,16 +108,16 @@ CF82:                                             ; preds = %CF75
   br i1 %Cmp49, label %CF75, label %CF81
 
 CF81:                                             ; preds = %CF82
-  %L50 = load i8, i8* %0
-  store double %L43, double* %Sl
+  %L50 = load i8, ptr %0
+  store double %L43, ptr %Sl
   %E51 = extractelement <4 x i64> %Shuff7, i32 3
   %Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> <i32 2, i32 4, i32 6, i32 0>
   %I53 = insertelement <2 x i1> %Cmp, i1 %E25, i32 0
   %B54 = fdiv double %L24, %L43
   %BC55 = bitcast <4 x i64> zeroinitializer to <4 x double>
   %Sl56 = select i1 false, i8 %5, i8 97
-  %L57 = load i8, i8* %0
-  store i8 %L50, i8* %0
+  %L57 = load i8, ptr %0
+  store i8 %L50, ptr %0
   %E58 = extractelement <2 x i1> %Shuff20, i32 1
   br i1 %E58, label %CF, label %CF73
 
@@ -126,11 +125,10 @@ CF73:                                             ; preds = %CF73, %CF81
   %Shuff59 = shufflevector <2 x i1> %Shuff13, <2 x i1> %Shuff45, <2 x i32> <i32 undef, i32 0>
   %I60 = insertelement <4 x float> %Shuff52, float -4.374162e+06, i32 0
   %B61 = mul <4 x i64> %I46, zeroinitializer
-  %PC62 = bitcast double* %A3 to float*
   %Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer
   %Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff
-  %L65 = load double, double* %A1
-  store float -4.374162e+06, float* %PC62
+  %L65 = load double, ptr %A1
+  store float -4.374162e+06, ptr %A3
   %E66 = extractelement <8 x i1> %I21, i32 3
   br i1 %E66, label %CF73, label %CF79
 
@@ -143,10 +141,10 @@ CF79:                                             ; preds = %CF79, %CF73
   br i1 %Cmp71, label %CF79, label %CF83
 
 CF83:                                             ; preds = %CF79
-  store double 0.000000e+00, double* %Sl
-  store float %BC, float* %PC62
-  store double %Sl48, double* %Sl
-  store double %FC, double* %Sl
-  store float %BC, float* %PC62
+  store double 0.000000e+00, ptr %Sl
+  store float %BC, ptr %A3
+  store double %Sl48, ptr %Sl
+  store double %FC, ptr %Sl
+  store float %BC, ptr %A3
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
index 0c6b1174bb7cd..0185c8d3eb336 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
@@ -7,32 +7,32 @@
 ; `Num < NumOperands && "Invalid child # of SDNode!"' assertion.
 ; It should at least successfully build.
 
-define void @autogen_SD525530439(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD525530439(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca i32
   %A3 = alloca double
   %A2 = alloca <1 x double>
   %A1 = alloca <8 x double>
   %A = alloca i64
-  %L = load i8, i8* %0
-  store i64 33695, i64* %A
+  %L = load i8, ptr %0
+  store i64 33695, ptr %A
   %E = extractelement <4 x i32> zeroinitializer, i32 3
   %Shuff = shufflevector <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 2, i32 0>
   %I = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
   %B = lshr <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
   %ZE = fpext float 0x3B64A2B880000000 to double
   %Sl = select i1 true, i16 -1, i16 -11642
-  %L5 = load i8, i8* %0
-  store i8 0, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 0, ptr %0
   %E6 = extractelement <4 x i32> zeroinitializer, i32 2
   %Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> <i32 undef, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 undef>
   %I8 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3
   %B9 = sub i32 71140, 439732
   %BC = bitcast <2 x i32> <i32 -1, i32 -1> to <2 x float>
-  %Sl10 = select i1 true, i32* %1, i32* %1
+  %Sl10 = select i1 true, ptr %1, ptr %1
   %Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer
-  %L11 = load i32, i32* %Sl10
-  store <1 x double> zeroinitializer, <1 x double>* %A2
+  %L11 = load i32, ptr %Sl10
+  store <1 x double> zeroinitializer, ptr %A2
   %E12 = extractelement <4 x i16> zeroinitializer, i32 0
   %Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef
   %I14 = insertelement <1 x i16> zeroinitializer, i16 %Sl, i32 0
@@ -43,8 +43,8 @@ BB:
   br label %CF75
 
 CF75:                                             ; preds = %CF75, %BB
-  %L19 = load i32, i32* %Sl10
-  store i32 %L11, i32* %Sl10
+  %L19 = load i32, ptr %Sl10
+  store i32 %L11, ptr %Sl10
   %E20 = extractelement <4 x i32> zeroinitializer, i32 1
   %Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> <i32 undef, i32 2, i32 4, i32 6>
   %I22 = insertelement <4 x float> %BC16, float 0x3EEF3D6300000000, i32 2
@@ -55,8 +55,8 @@ CF75:                                             ; preds = %CF75, %BB
   br i1 %Cmp26, label %CF75, label %CF76
 
 CF76:                                             ; preds = %CF75
-  %L27 = load i32, i32* %Sl10
-  store i32 439732, i32* %Sl10
+  %L27 = load i32, ptr %Sl10
+  store i32 439732, ptr %Sl10
   %E28 = extractelement <4 x i32> %Shuff21, i32 3
   %Shuff29 = shufflevector <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0>
   %I30 = insertelement <8 x i1> %Shuff7, i1 %Cmp18, i32 4
@@ -65,8 +65,8 @@ CF76:                                             ; preds = %CF75
   br label %CF74
 
 CF74:                                             ; preds = %CF74, %CF80, %CF78, %CF76
-  %L33 = load i64, i64* %2
-  store i32 71140, i32* %Sl10
+  %L33 = load i64, ptr %2
+  store i32 71140, ptr %Sl10
   %E34 = extractelement <4 x i32> zeroinitializer, i32 1
   %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef
   %I36 = insertelement <4 x i16> zeroinitializer, i16 -11642, i32 0
@@ -76,8 +76,8 @@ CF74:                                             ; preds = %CF74, %CF80, %CF78,
   br i1 %Cmp39, label %CF74, label %CF80
 
 CF80:                                             ; preds = %CF74
-  %L40 = load i8, i8* %0
-  store i32 0, i32* %Sl10
+  %L40 = load i8, ptr %0
+  store i32 0, ptr %Sl10
   %E41 = extractelement <8 x i64> zeroinitializer, i32 1
   %Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef
   %I43 = insertelement <4 x i16> %I36, i16 -11642, i32 0
@@ -86,8 +86,8 @@ CF80:                                             ; preds = %CF74
   br i1 %Sl44, label %CF74, label %CF78
 
 CF78:                                             ; preds = %CF80
-  %L45 = load i32, i32* %Sl10
-  store i8 %L5, i8* %0
+  %L45 = load i32, ptr %Sl10
+  store i8 %L5, ptr %0
   %E46 = extractelement <8 x i1> %Shuff7, i32 2
   br i1 %E46, label %CF74, label %CF77
 
@@ -101,28 +101,27 @@ CF77:                                             ; preds = %CF77, %CF78
   br i1 %Cmp52, label %CF77, label %CF79
 
 CF79:                                             ; preds = %CF77
-  %L53 = load i32, i32* %Sl10
-  store i8 %L40, i8* %0
+  %L53 = load i32, ptr %Sl10
+  store i8 %L40, ptr %0
   %E54 = extractelement <4 x i32> zeroinitializer, i32 1
   %Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> <i32 4, i32 6, i32 undef, i32 2>
   %I56 = insertelement <4 x i32> zeroinitializer, i32 %Sl51, i32 2
   %Tr = trunc <1 x i64> %Shuff13 to <1 x i16>
   %Sl57 = select i1 %Cmp18, <2 x i32> <i32 -1, i32 -1>, <2 x i32> <i32 -1, i32 -1>
   %Cmp58 = icmp uge <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %I56
-  %L59 = load i8, i8* %0
-  store <1 x double> zeroinitializer, <1 x double>* %A2
+  %L59 = load i8, ptr %0
+  store <1 x double> zeroinitializer, ptr %A2
   %E60 = extractelement <4 x i32> zeroinitializer, i32 0
   %Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> <i32 undef, i32 1, i32 undef, i32 undef>
   %I62 = insertelement <4 x i16> zeroinitializer, i16 %E12, i32 1
   %B63 = and <4 x i32> %Shuff61, <i32 -1, i32 -1, i32 -1, i32 -1>
-  %PC = bitcast double* %A3 to i32*
   %Sl64 = select i1 %Cmp18, <4 x i32> %Shuff61, <4 x i32> %Shuff55
   %Cmp65 = icmp sgt i32 439732, %3
   br label %CF
 
 CF:                                               ; preds = %CF79
-  %L66 = load i32, i32* %Sl10
-  store i32 %E6, i32* %PC
+  %L66 = load i32, ptr %Sl10
+  store i32 %E6, ptr %A3
   %E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
   %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef>
   %I69 = insertelement <4 x i16> %Shuff47, i16 %Sl, i32 3
@@ -130,10 +129,10 @@ CF:                                               ; preds = %CF79
   %FC71 = sitofp i32 %L66 to double
   %Sl72 = select i1 %Cmp18, i64 %4, i64 %4
   %Cmp73 = icmp eq <4 x i64> zeroinitializer, %B70
-  store i32 %B23, i32* %PC
-  store i32 %3, i32* %PC
-  store i32 %3, i32* %Sl10
-  store i32 %L27, i32* %1
-  store i32 0, i32* %PC
+  store i32 %B23, ptr %A3
+  store i32 %3, ptr %A3
+  store i32 %3, ptr %Sl10
+  store i32 %L27, ptr %1
+  store i32 0, ptr %A3
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
index c9ead854d9c56..ae02778783e36 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
@@ -7,22 +7,22 @@
 ; v2f64 on MSA.
 ; It should at least successfully build.
 
-define void @autogen_SD997348632(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD997348632(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca <2 x i32>
   %A3 = alloca <16 x i16>
   %A2 = alloca <4 x i1>
   %A1 = alloca <4 x i16>
   %A = alloca <2 x i32>
-  %L = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L = load i8, ptr %0
+  store i8 %L, ptr %0
   %E = extractelement <4 x i32> zeroinitializer, i32 0
   %Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 1, i32 3, i32 5>
   %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
   %FC = sitofp <4 x i32> zeroinitializer to <4 x double>
   %Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff
-  %L5 = load i8, i8* %0
-  store i8 %5, i8* %0
+  %L5 = load i8, ptr %0
+  store i8 %5, ptr %0
   %E6 = extractelement <1 x i16> zeroinitializer, i32 0
   %Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> <i32 1, i32 undef>
   %I8 = insertelement <1 x i16> zeroinitializer, i16 0, i32 0
@@ -30,8 +30,8 @@ BB:
   %FC9 = fptoui float 0x406DB70180000000 to i64
   %Sl10 = select i1 false, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
   %Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer
-  %L11 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L11 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E12 = extractelement <4 x i64> zeroinitializer, i32 2
   %Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 undef, i32 3>
   %I14 = insertelement <8 x i32> zeroinitializer, i32 -1, i32 7
@@ -42,8 +42,8 @@ BB:
   br label %CF
 
 CF:                                               ; preds = %CF, %CF79, %CF84, %BB
-  %L18 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L18 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E19 = extractelement <4 x i64> %Sl, i32 3
   %Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> <i32 2, i32 0>
   %I21 = insertelement <4 x i64> zeroinitializer, i64 %FC9, i32 0
@@ -54,8 +54,8 @@ CF:                                               ; preds = %CF, %CF79, %CF84, %
   br i1 %Cmp25, label %CF, label %CF79
 
 CF79:                                             ; preds = %CF
-  %L26 = load i8, i8* %0
-  store i8 %L26, i8* %0
+  %L26 = load i8, ptr %0
+  store i8 %L26, ptr %0
   %E27 = extractelement <1 x i16> zeroinitializer, i32 0
   %Shuff28 = shufflevector <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11>
   %I29 = insertelement <16 x i32> %Shuff28, i32 %B, i32 15
@@ -65,8 +65,8 @@ CF79:                                             ; preds = %CF
   br i1 %Cmp32, label %CF, label %CF78
 
 CF78:                                             ; preds = %CF78, %CF79
-  %L33 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L33 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E34 = extractelement <16 x i32> %Shuff28, i32 1
   %Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> <i32 undef, i32 6, i32 0, i32 2>
   %I36 = insertelement <4 x double> %FC, double 0xA4A57F449CA36CC2, i32 2
@@ -76,8 +76,8 @@ CF78:                                             ; preds = %CF78, %CF79
   br i1 %Cmp38, label %CF78, label %CF80
 
 CF80:                                             ; preds = %CF80, %CF82, %CF78
-  %L39 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L39 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E40 = extractelement <2 x i1> %Shuff20, i32 1
   br i1 %E40, label %CF80, label %CF82
 
@@ -87,8 +87,8 @@ CF82:                                             ; preds = %CF80
   %B43 = sub i32 %E, 0
   %Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28
   %Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21
-  %L46 = load i8, i8* %0
-  store i8 %L11, i8* %0
+  %L46 = load i8, ptr %0
+  store i8 %L11, ptr %0
   %E47 = extractelement <8 x i32> %Sl16, i32 4
   %Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> <i32 undef, i32 1>
   %I49 = insertelement <2 x i1> %Shuff48, i1 %Cmp17, i32 1
@@ -99,8 +99,8 @@ CF82:                                             ; preds = %CF80
 CF81:                                             ; preds = %CF81, %CF82
   %Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000
   %Cmp53 = icmp uge <2 x i32> <i32 -1, i32 -1>, <i32 -1, i32 -1>
-  %L54 = load i8, i8* %0
-  store i8 %L5, i8* %0
+  %L54 = load i8, ptr %0
+  store i8 %L5, ptr %0
   %E55 = extractelement <8 x i32> zeroinitializer, i32 7
   %Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 undef, i32 4, i32 6, i32 0>
   %I57 = insertelement <2 x i1> %Shuff7, i1 false, i32 0
@@ -108,8 +108,8 @@ CF81:                                             ; preds = %CF81, %CF82
   %FC59 = fptoui <4 x double> %I36 to <4 x i16>
   %Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57
   %Cmp61 = icmp ule <8 x i32> %B50, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
-  %L62 = load i8, i8* %0
-  store i8 %L33, i8* %0
+  %L62 = load i8, ptr %0
+  store i8 %L33, ptr %0
   %E63 = extractelement <4 x i64> %Shuff, i32 2
   %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef>
   %I65 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1
@@ -126,18 +126,18 @@ CF84:                                             ; preds = %CF83
   br i1 %Cmp69, label %CF, label %CF77
 
 CF77:                                             ; preds = %CF84
-  %L70 = load i8, i8* %0
-  store i8 %L, i8* %0
+  %L70 = load i8, ptr %0
+  store i8 %L, ptr %0
   %E71 = extractelement <4 x i64> %Shuff, i32 0
   %Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> <i32 3, i32 1>
   %I73 = insertelement <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B66, i32 1
   %FC74 = uitofp i1 %Cmp32 to double
   %Sl75 = select i1 %FC51, i16 9704, i16 0
   %Cmp76 = icmp ugt <1 x i16> %I8, %I8
-  store i8 %L39, i8* %0
-  store i8 %5, i8* %0
-  store i8 %Tr23, i8* %0
-  store i8 %L, i8* %0
-  store i8 %5, i8* %0
+  store i8 %L39, ptr %0
+  store i8 %5, ptr %0
+  store i8 %Tr23, ptr %0
+  store i8 %L, ptr %0
+  store i8 %5, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
index 10fa30d8cb175..16900fbf48385 100644
--- a/llvm/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
+++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-sz1-s742806235.ll
@@ -7,17 +7,17 @@
 ; build_vector.
 ; It should at least successfully build.
 
-define void @autogen_SD742806235(i8*, i32*, i64*, i32, i64, i8) {
+define void @autogen_SD742806235(ptr, ptr, ptr, i32, i64, i8) {
 BB:
   %A4 = alloca double
   %A3 = alloca double
   %A2 = alloca <8 x i8>
   %A1 = alloca <4 x float>
   %A = alloca i1
-  store i8 %5, i8* %0
-  store i8 %5, i8* %0
-  store i8 %5, i8* %0
-  store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, <8 x i8>* %A2
-  store i8 %5, i8* %0
+  store i8 %5, ptr %0
+  store i8 %5, ptr %0
+  store i8 %5, ptr %0
+  store <8 x i8> <i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1, i8 0, i8 -1>, ptr %A2
+  store i8 %5, ptr %0
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/msa-nooddspreg.ll b/llvm/test/CodeGen/Mips/msa/msa-nooddspreg.ll
index 7cfc66650e6be..3071a358d6dc1 100644
--- a/llvm/test/CodeGen/Mips/msa/msa-nooddspreg.ll
+++ b/llvm/test/CodeGen/Mips/msa/msa-nooddspreg.ll
@@ -16,7 +16,7 @@ define void @test() {
 entry:
 ; CHECK-NOT: lwc1 $f{{[13579]+}}
 ; CHECK: lwc1 $f{{[02468]+}}
-  %0 = load float, float * @f1
+  %0 = load float, ptr @f1
   %1 = insertelement <4 x float> undef,    float %0, i32 0
   %2 = insertelement <4 x float> %1,    float %0, i32 1
   %3 = insertelement <4 x float> %2,    float %0, i32 2
@@ -24,14 +24,14 @@ entry:
 
 ; CHECK-NOT: lwc1 $f{{[13579]+}}
 ; CHECK: lwc1 $f{{[02468]+}}
-  %5 = load float, float * @f2
+  %5 = load float, ptr @f2
   %6 = insertelement <4 x float> undef,    float %5, i32 0
   %7 = insertelement <4 x float> %6,    float %5, i32 1
   %8 = insertelement <4 x float> %7,    float %5, i32 2
   %9 = insertelement <4 x float> %8,    float %5, i32 3
 
   %10 = fadd <4 x float> %4, %9
-  store <4 x float> %10, <4 x float> * @v3
+  store <4 x float> %10, ptr @v3
   ret void
 }
 
@@ -43,13 +43,13 @@ define void @test2() {
 entry:
 ; CHECK-NOT: lwc1 $f{{[13579]+}}
 ; CHECK: lwc1 $f{{[02468]+}}
-  %0 = load float, float * @f1
+  %0 = load float, ptr @f1
   %1 = fpext float %0 to double
 ; CHECK-NOT: lwc1 $f{{[13579]+}}
 ; CHECK: lwc1 $f{{[02468]+}}
-  %2 = load float, float * @f2
+  %2 = load float, ptr @f2
   %3 = fpext float %2 to double
   %4 = fadd double %1, %3
-  store double%4, double * @d1
+  store double%4, ptr @d1
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/msa/shift-dagcombine.ll b/llvm/test/CodeGen/Mips/msa/shift-dagcombine.ll
index 1a86a6284a003..1f8572751c1a4 100644
--- a/llvm/test/CodeGen/Mips/msa/shift-dagcombine.ll
+++ b/llvm/test/CodeGen/Mips/msa/shift-dagcombine.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
-define void @ashr_v4i32(<4 x i32>* %c) nounwind {
+define void @ashr_v4i32(ptr %c) nounwind {
   ; CHECK-LABEL: ashr_v4i32:
 
   %1 = ashr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
@@ -8,7 +8,7 @@ define void @ashr_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-NOT: sra
   ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
   ; CHECK-NOT: sra
-  store volatile <4 x i32> %1, <4 x i32>* %c
+  store volatile <4 x i32> %1, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   %2 = ashr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
@@ -16,14 +16,14 @@ define void @ashr_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-NOT: sra
   ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -2
   ; CHECK-NOT: sra
-  store volatile <4 x i32> %2, <4 x i32>* %c
+  store volatile <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   ret void
   ; CHECK-LABEL: .size ashr_v4i32
 }
 
-define void @lshr_v4i32(<4 x i32>* %c) nounwind {
+define void @lshr_v4i32(ptr %c) nounwind {
   ; CHECK-LABEL: lshr_v4i32:
 
   %1 = lshr <4 x i32> <i32 1, i32 2, i32 4, i32 8>,
@@ -31,7 +31,7 @@ define void @lshr_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-NOT: srl
   ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 1
   ; CHECK-NOT: srl
-  store volatile <4 x i32> %1, <4 x i32>* %c
+  store volatile <4 x i32> %1, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   %2 = lshr <4 x i32> <i32 -2, i32 -4, i32 -8, i32 -16>,
@@ -40,14 +40,14 @@ define void @lshr_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-DAG: addiu [[CPOOL:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0([[CPOOL]])
   ; CHECK-NOT: srl
-  store volatile <4 x i32> %2, <4 x i32>* %c
+  store volatile <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   ret void
   ; CHECK-LABEL: .size lshr_v4i32
 }
 
-define void @shl_v4i32(<4 x i32>* %c) nounwind {
+define void @shl_v4i32(ptr %c) nounwind {
   ; CHECK-LABEL: shl_v4i32:
 
   %1 = shl <4 x i32> <i32 8, i32 4, i32 2, i32 1>,
@@ -55,7 +55,7 @@ define void @shl_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-NOT: sll
   ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], 8
   ; CHECK-NOT: sll
-  store volatile <4 x i32> %1, <4 x i32>* %c
+  store volatile <4 x i32> %1, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   %2 = shl <4 x i32> <i32 -8, i32 -4, i32 -2, i32 -1>,
@@ -63,7 +63,7 @@ define void @shl_v4i32(<4 x i32>* %c) nounwind {
   ; CHECK-NOT: sll
   ; CHECK-DAG: ldi.w [[R1:\$w[0-9]+]], -8
   ; CHECK-NOT: sll
-  store volatile <4 x i32> %2, <4 x i32>* %c
+  store volatile <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R1]], 0($4)
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/shift_constant_pool.ll b/llvm/test/CodeGen/Mips/msa/shift_constant_pool.ll
index 186616fa3be15..9312a05f56960 100644
--- a/llvm/test/CodeGen/Mips/msa/shift_constant_pool.ll
+++ b/llvm/test/CodeGen/Mips/msa/shift_constant_pool.ll
@@ -15,7 +15,7 @@
 define void @llvm_mips_bclr_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_bclr_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_bclr_w_test_const_vec_res
   ret void
 }
 
@@ -43,7 +43,7 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
 define void @llvm_mips_bneg_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> <i32 2147483649, i32 2147483649, i32 7, i32 7>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_bneg_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_bneg_w_test_const_vec_res
   ret void
 }
 
@@ -71,7 +71,7 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
 define void @llvm_mips_bset_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_bset_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_bset_w_test_const_vec_res
   ret void
 }
 
@@ -98,7 +98,7 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
 define void @llvm_mips_sll_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 -1, i32 31, i32 2, i32 34>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_sll_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_sll_w_test_const_vec_res
   ret void
 }
 
@@ -125,7 +125,7 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
 define void @llvm_mips_sra_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_sra_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_sra_w_test_const_vec_res
   ret void
 }
 
@@ -152,7 +152,7 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
 define void @llvm_mips_srl_w_test_const_vec() nounwind {
 entry:
   %0 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> <i32 -16, i32 16, i32 16, i32 16>, <4 x i32> <i32 2, i32 -30, i32 33, i32 1>)
-  store <4 x i32> %0, <4 x i32>* @llvm_mips_srl_w_test_const_vec_res
+  store <4 x i32> %0, ptr @llvm_mips_srl_w_test_const_vec_res
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/shift_no_and.ll b/llvm/test/CodeGen/Mips/msa/shift_no_and.ll
index 172763bb64d1d..d975f43e0c06b 100644
--- a/llvm/test/CodeGen/Mips/msa/shift_no_and.ll
+++ b/llvm/test/CodeGen/Mips/msa/shift_no_and.ll
@@ -9,10 +9,10 @@
 
 define void @llvm_mips_bclr_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bclr_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bclr_b_RES
   ret void
 }
 
@@ -28,10 +28,10 @@ declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bclr_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bclr_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bclr_h_RES
   ret void
 }
 
@@ -47,10 +47,10 @@ declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bclr_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bclr_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bclr_w_RES
   ret void
 }
 
@@ -66,10 +66,10 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bclr_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bclr_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bclr_d_RES
   ret void
 }
 
@@ -85,10 +85,10 @@ declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_bneg_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bneg_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bneg_b_RES
   ret void
 }
 
@@ -104,10 +104,10 @@ declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bneg_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bneg_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bneg_h_RES
   ret void
 }
 
@@ -123,10 +123,10 @@ declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bneg_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bneg_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bneg_w_RES
   ret void
 }
 
@@ -142,10 +142,10 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bneg_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bneg_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bneg_d_RES
   ret void
 }
 
@@ -161,10 +161,10 @@ declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_bset_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bset_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_bset_b_RES
   ret void
 }
 
@@ -180,10 +180,10 @@ declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_bset_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bset_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_bset_h_RES
   ret void
 }
 
@@ -199,10 +199,10 @@ declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_bset_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bset_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_bset_w_RES
   ret void
 }
 
@@ -218,10 +218,10 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_bset_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bset_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_bset_d_RES
   ret void
 }
 
@@ -237,10 +237,10 @@ declare <2 x i64> @llvm.mips.bset.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_sll_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sll_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sll_b_RES
   ret void
 }
 
@@ -256,10 +256,10 @@ declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_sll_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sll_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sll_h_RES
   ret void
 }
 
@@ -275,10 +275,10 @@ declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_sll_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sll_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sll_w_RES
   ret void
 }
 
@@ -294,10 +294,10 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_sll_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sll_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sll_d_RES
   ret void
 }
 
@@ -313,10 +313,10 @@ declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_sra_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_sra_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_sra_b_RES
   ret void
 }
 
@@ -332,10 +332,10 @@ declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_sra_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_sra_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_sra_h_RES
   ret void
 }
 
@@ -351,10 +351,10 @@ declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_sra_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_sra_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_sra_w_RES
   ret void
 }
 
@@ -370,10 +370,10 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_sra_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_sra_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_sra_d_RES
   ret void
 }
 
@@ -389,10 +389,10 @@ declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind
 
 define void @llvm_mips_srl_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_srl_b_ARG2
   %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1)
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_srl_b_RES
   ret void
 }
 
@@ -408,10 +408,10 @@ declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind
 
 define void @llvm_mips_srl_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_srl_h_ARG2
   %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1)
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_srl_h_RES
   ret void
 }
 
@@ -427,10 +427,10 @@ declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind
 
 define void @llvm_mips_srl_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_srl_w_ARG2
   %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1)
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_srl_w_RES
   ret void
 }
 
@@ -446,10 +446,10 @@ declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind
 
 define void @llvm_mips_srl_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_srl_d_ARG2
   %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1)
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_srl_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/shuffle.ll b/llvm/test/CodeGen/Mips/msa/shuffle.ll
index 240606751cc8a..61b42f5df441b 100644
--- a/llvm/test/CodeGen/Mips/msa/shuffle.ll
+++ b/llvm/test/CodeGen/Mips/msa/shuffle.ll
@@ -1,56 +1,56 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
-define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.b [[R3]], [[R1]], [[R1]]
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 16>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_3(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v16i8_3:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> <i32 17, i32 24, i32 25, i32 18, i32 19, i32 20, i32 28, i32 19, i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -58,75 +58,75 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind
   ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
   ; the operands to get the right answer.
   ; CHECK-DAG: vshf.b [[R3]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @vshf_v16i8_4(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v16i8_4:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17, i32 1, i32 17>
   ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1]
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.h [[R3]], [[R1]], [[R1]]
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 8>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_3(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v8i16_3:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 8, i32 9, i32 2, i32 3, i32 4, i32 12, i32 3>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -134,20 +134,20 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
   ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
   ; the operands to get the right answer.
   ; CHECK-DAG: vshf.h [[R3]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @vshf_v8i16_4(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v8i16_4:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> <i32 1, i32 9, i32 1, i32 9, i32 1, i32 9, i32 1, i32 9>
   ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1]
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
@@ -156,52 +156,52 @@ define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind
 ; Note: v4i32 only has one 4-element set so it's impossible to get a vshf.w
 ; instruction when using a single vector.
 
-define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
   ; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][1]
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 5, i32 6, i32 4>
   ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_3(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v4i32_3:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 6, i32 4>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -209,76 +209,76 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind
   ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
   ; the operands to get the right answer.
   ; CHECK-DAG: vshf.w [[R3]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @vshf_v4i32_4(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v4i32_4:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 5, i32 5, i32 1>
   ; The two operand vectors are the same so element 1 and 5 are equivalent.
   ; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][1]
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.d [[R3]], [[R1]], [[R1]]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 2>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
   ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0([[PTR_A]])
   ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R2]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_3(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v2i64_3:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 2>
   ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($
@@ -286,59 +286,59 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind
   ; The concatenation step of vshf is bitwise not vectorwise so we must reverse
   ; the operands to get the right answer.
   ; CHECK-DAG: vshf.d [[R3]], [[R2]], [[R1]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @vshf_v2i64_4(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: vshf_v2i64_4:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> <i32 1, i32 3>
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @shf_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: shf_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> <i32 1, i32 3, i32 2, i32 0, i32 5, i32 7, i32 6, i32 4, i32 9, i32 11, i32 10, i32 8, i32 13, i32 15, i32 14, i32 12>
   ; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @shf_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: shf_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
   ; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @shf_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: shf_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
@@ -346,1121 +346,1121 @@ define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
 
 ; shf.d does not exist
 
-define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
   ; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
   ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
   ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
   ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
 ; Interleaving one operand with itself.
-define void @ilvev_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
   ; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
   ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
   ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
   ; ilvev.d with two identical operands is equivalent to splati.d
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvev_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 16, i32 16, i32 18, i32 18, i32 20, i32 20, i32 22, i32 22, i32 24, i32 24, i32 26, i32 26, i32 28, i32 28, i32 30, i32 30>
   ; CHECK-DAG: ilvev.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvev_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
   ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvev_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 4, i32 6, i32 6>
   ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvev_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvev_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvev_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
   ; ilvev.d with two identical operands is equivalent to splati.d
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
   ; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
   ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
   ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
   ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
   ; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
   ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
   ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
   ; ilvod.d with two identical operands is equivalent to splati.d
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvod_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 17, i32 17, i32 19, i32 19, i32 21, i32 21, i32 23, i32 23, i32 25, i32 25, i32 27, i32 27, i32 29, i32 29, i32 31, i32 31>
   ; CHECK-DAG: ilvod.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvod_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
   ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvod_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 5, i32 5, i32 7, i32 7>
   ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvod_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvod_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvod_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
   ; ilvod.d with two identical operands is equivalent to splati.d
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
   ; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
   ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
   ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
   ; ilvr.d and ilvev.d are equivalent for v2i64
   ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 16, i32 16, i32 17, i32 17, i32 18, i32 18, i32 19, i32 19, i32 20, i32 20, i32 21, i32 21, i32 22, i32 22, i32 23, i32 23>
   ; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11>
   ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 4, i32 5, i32 5>
   ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
   ; ilvr.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvr_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
   ; CHECK-DAG: ilvr.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvr_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3>
   ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvr_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
   ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvr_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvr_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvr_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
   ; ilvr.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
   ; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
   ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
   ; ilvl.d and ilvod.d are equivalent for v2i64
   ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 24, i32 24, i32 25, i32 25, i32 26, i32 26, i32 27, i32 27, i32 28, i32 28, i32 29, i32 29, i32 30, i32 30, i32 31, i32 31>
   ; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
   ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 6, i32 6, i32 7, i32 7>
   ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
   ; ilvl.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @ilvl_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
   ; CHECK-DAG: ilvl.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @ilvl_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
   ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @ilvl_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
   ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @ilvl_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @ilvl_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: ilvl_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
   ; ilvl.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   ; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
   ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 2>
   ; pckev.d and ilvev.d are equivalent for v2i64
   ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
   ; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 8, i32 10, i32 12, i32 14, i32 8, i32 10, i32 12, i32 14>
   ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 6, i32 4, i32 6>
   ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 2, i32 2>
   ; pckev.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckev_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
   ; CHECK-DAG: pckev.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckev_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 0, i32 2, i32 4, i32 6>
   ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckev_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 0, i32 2, i32 0, i32 2>
   ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckev_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckev_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckev_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 0, i32 0>
   ; pckev.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][0]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   ; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
   ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R2]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_0(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 3>
   ; pckod.d and ilvod.d are equivalent for v2i64
   ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v16i8_1:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %1 = load <16 x i8>, ptr %a
+  %2 = load <16 x i8>, ptr %b
   ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
   ; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v8i16_1:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %1 = load <8 x i16>, ptr %a
+  %2 = load <8 x i16>, ptr %b
   ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 9, i32 11, i32 13, i32 15>
   ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v4i32_1:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %1 = load <4 x i32>, ptr %a
+  %2 = load <4 x i32>, ptr %b
   ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 5, i32 7, i32 5, i32 7>
   ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R2]], [[R2]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_1(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v2i64_1:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %1 = load <2 x i64>, ptr %a
+  %2 = load <2 x i64>, ptr %b
   ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6)
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 3, i32 3>
   ; pckod.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R2]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind {
+define void @pckod_v16i8_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v16i8_2:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <16 x i8>, <16 x i8>* %b
+  %2 = load <16 x i8>, ptr %b
   %3 = shufflevector <16 x i8> %1, <16 x i8> %2,
                      <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
   ; CHECK-DAG: pckod.b [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <16 x i8> %3, <16 x i8>* %c
+  store <16 x i8> %3, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind {
+define void @pckod_v8i16_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v8i16_2:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <8 x i16>, <8 x i16>* %b
+  %2 = load <8 x i16>, ptr %b
   %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 1, i32 3, i32 5, i32 7>
   ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <8 x i16> %3, <8 x i16>* %c
+  store <8 x i16> %3, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind {
+define void @pckod_v4i32_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v4i32_2:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <4 x i32>, <4 x i32>* %b
+  %2 = load <4 x i32>, ptr %b
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 1, i32 3, i32 1, i32 3>
   ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R1]]
-  store <4 x i32> %3, <4 x i32>* %c
+  store <4 x i32> %3, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @pckod_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind {
+define void @pckod_v2i64_2(ptr %c, ptr %a, ptr %b) nounwind {
   ; CHECK-LABEL: pckod_v2i64_2:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
-  %2 = load <2 x i64>, <2 x i64>* %b
+  %2 = load <2 x i64>, ptr %b
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> <i32 1, i32 1>
   ; pckod.d and splati.d are equivalent for v2i64
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %3, <2 x i64>* %c
+  store <2 x i64> %3, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void
 }
 
-define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind {
+define void @splati_v16i8_0(ptr %c, ptr %a) nounwind {
   ; CHECK-LABEL: splati_v16i8_0:
 
-  %1 = load <16 x i8>, <16 x i8>* %a
+  %1 = load <16 x i8>, ptr %a
   ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <16 x i8> %1, <16 x i8> undef,
                      <16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
   ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][4]
-  store <16 x i8> %2, <16 x i8>* %c
+  store <16 x i8> %2, ptr %c
   ; CHECK-DAG: st.b [[R3]], 0($4)
 
   ret void
 }
 
-define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind {
+define void @splati_v8i16_0(ptr %c, ptr %a) nounwind {
   ; CHECK-LABEL: splati_v8i16_0:
 
-  %1 = load <8 x i16>, <8 x i16>* %a
+  %1 = load <8 x i16>, ptr %a
   ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
   ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4]
-  store <8 x i16> %2, <8 x i16>* %c
+  store <8 x i16> %2, ptr %c
   ; CHECK-DAG: st.h [[R3]], 0($4)
 
   ret void
 }
 
-define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind {
+define void @splati_v4i32_0(ptr %c, ptr %a) nounwind {
   ; CHECK-LABEL: splati_v4i32_0:
 
-  %1 = load <4 x i32>, <4 x i32>* %a
+  %1 = load <4 x i32>, ptr %a
   ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
   ; CHECK-DAG: splati.w [[R3:\$w[0-9]+]], [[R1]][3]
-  store <4 x i32> %2, <4 x i32>* %c
+  store <4 x i32> %2, ptr %c
   ; CHECK-DAG: st.w [[R3]], 0($4)
 
   ret void
 }
 
-define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind {
+define void @splati_v2i64_0(ptr %c, ptr %a) nounwind {
   ; CHECK-LABEL: splati_v2i64_0:
 
-  %1 = load <2 x i64>, <2 x i64>* %a
+  %1 = load <2 x i64>, ptr %a
   ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5)
   %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
   ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1]
-  store <2 x i64> %2, <2 x i64>* %c
+  store <2 x i64> %2, ptr %c
   ; CHECK-DAG: st.d [[R3]], 0($4)
 
   ret void

diff  --git a/llvm/test/CodeGen/Mips/msa/spill.ll b/llvm/test/CodeGen/Mips/msa/spill.ll
index d5502b5fa75f6..29f02920b10c6 100644
--- a/llvm/test/CodeGen/Mips/msa/spill.ll
+++ b/llvm/test/CodeGen/Mips/msa/spill.ll
@@ -4,75 +4,75 @@
 ; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 ; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r2 < %s | FileCheck %s
 
-define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
+define i32 @test_i8(ptr %p0, ptr %q1) nounwind {
 entry:
-  %p1  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 1
-  %p2  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 2
-  %p3  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 3
-  %p4  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 4
-  %p5  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 5
-  %p6  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 6
-  %p7  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 7
-  %p8  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 8
-  %p9  = getelementptr <16 x i8>, <16 x i8>* %p0, i32 9
-  %p10 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 10
-  %p11 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 11
-  %p12 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 12
-  %p13 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 13
-  %p14 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 14
-  %p15 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 15
-  %p16 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 16
-  %p17 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 17
-  %p18 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 18
-  %p19 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 19
-  %p20 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 20
-  %p21 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 21
-  %p22 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 22
-  %p23 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 23
-  %p24 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 24
-  %p25 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 25
-  %p26 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 26
-  %p27 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 27
-  %p28 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 28
-  %p29 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 29
-  %p30 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 30
-  %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31
-  %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32
-  %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33
-  %0  = load <16 x i8>, <16 x i8>* %p0, align 16
-  %1  = load <16 x i8>, <16 x i8>* %p1, align 16
-  %2  = load <16 x i8>, <16 x i8>* %p2, align 16
-  %3  = load <16 x i8>, <16 x i8>* %p3, align 16
-  %4  = load <16 x i8>, <16 x i8>* %p4, align 16
-  %5  = load <16 x i8>, <16 x i8>* %p5, align 16
-  %6  = load <16 x i8>, <16 x i8>* %p6, align 16
-  %7  = load <16 x i8>, <16 x i8>* %p7, align 16
-  %8  = load <16 x i8>, <16 x i8>* %p8, align 16
-  %9  = load <16 x i8>, <16 x i8>* %p9, align 16
-  %10 = load <16 x i8>, <16 x i8>* %p10, align 16
-  %11 = load <16 x i8>, <16 x i8>* %p11, align 16
-  %12 = load <16 x i8>, <16 x i8>* %p12, align 16
-  %13 = load <16 x i8>, <16 x i8>* %p13, align 16
-  %14 = load <16 x i8>, <16 x i8>* %p14, align 16
-  %15 = load <16 x i8>, <16 x i8>* %p15, align 16
-  %16 = load <16 x i8>, <16 x i8>* %p16, align 16
-  %17 = load <16 x i8>, <16 x i8>* %p17, align 16
-  %18 = load <16 x i8>, <16 x i8>* %p18, align 16
-  %19 = load <16 x i8>, <16 x i8>* %p19, align 16
-  %20 = load <16 x i8>, <16 x i8>* %p20, align 16
-  %21 = load <16 x i8>, <16 x i8>* %p21, align 16
-  %22 = load <16 x i8>, <16 x i8>* %p22, align 16
-  %23 = load <16 x i8>, <16 x i8>* %p23, align 16
-  %24 = load <16 x i8>, <16 x i8>* %p24, align 16
-  %25 = load <16 x i8>, <16 x i8>* %p25, align 16
-  %26 = load <16 x i8>, <16 x i8>* %p26, align 16
-  %27 = load <16 x i8>, <16 x i8>* %p27, align 16
-  %28 = load <16 x i8>, <16 x i8>* %p28, align 16
-  %29 = load <16 x i8>, <16 x i8>* %p29, align 16
-  %30 = load <16 x i8>, <16 x i8>* %p30, align 16
-  %31 = load <16 x i8>, <16 x i8>* %p31, align 16
-  %32 = load <16 x i8>, <16 x i8>* %p32, align 16
-  %33 = load <16 x i8>, <16 x i8>* %p33, align 16
+  %p1  = getelementptr <16 x i8>, ptr %p0, i32 1
+  %p2  = getelementptr <16 x i8>, ptr %p0, i32 2
+  %p3  = getelementptr <16 x i8>, ptr %p0, i32 3
+  %p4  = getelementptr <16 x i8>, ptr %p0, i32 4
+  %p5  = getelementptr <16 x i8>, ptr %p0, i32 5
+  %p6  = getelementptr <16 x i8>, ptr %p0, i32 6
+  %p7  = getelementptr <16 x i8>, ptr %p0, i32 7
+  %p8  = getelementptr <16 x i8>, ptr %p0, i32 8
+  %p9  = getelementptr <16 x i8>, ptr %p0, i32 9
+  %p10 = getelementptr <16 x i8>, ptr %p0, i32 10
+  %p11 = getelementptr <16 x i8>, ptr %p0, i32 11
+  %p12 = getelementptr <16 x i8>, ptr %p0, i32 12
+  %p13 = getelementptr <16 x i8>, ptr %p0, i32 13
+  %p14 = getelementptr <16 x i8>, ptr %p0, i32 14
+  %p15 = getelementptr <16 x i8>, ptr %p0, i32 15
+  %p16 = getelementptr <16 x i8>, ptr %p0, i32 16
+  %p17 = getelementptr <16 x i8>, ptr %p0, i32 17
+  %p18 = getelementptr <16 x i8>, ptr %p0, i32 18
+  %p19 = getelementptr <16 x i8>, ptr %p0, i32 19
+  %p20 = getelementptr <16 x i8>, ptr %p0, i32 20
+  %p21 = getelementptr <16 x i8>, ptr %p0, i32 21
+  %p22 = getelementptr <16 x i8>, ptr %p0, i32 22
+  %p23 = getelementptr <16 x i8>, ptr %p0, i32 23
+  %p24 = getelementptr <16 x i8>, ptr %p0, i32 24
+  %p25 = getelementptr <16 x i8>, ptr %p0, i32 25
+  %p26 = getelementptr <16 x i8>, ptr %p0, i32 26
+  %p27 = getelementptr <16 x i8>, ptr %p0, i32 27
+  %p28 = getelementptr <16 x i8>, ptr %p0, i32 28
+  %p29 = getelementptr <16 x i8>, ptr %p0, i32 29
+  %p30 = getelementptr <16 x i8>, ptr %p0, i32 30
+  %p31 = getelementptr <16 x i8>, ptr %p0, i32 31
+  %p32 = getelementptr <16 x i8>, ptr %p0, i32 32
+  %p33 = getelementptr <16 x i8>, ptr %p0, i32 33
+  %0  = load <16 x i8>, ptr %p0, align 16
+  %1  = load <16 x i8>, ptr %p1, align 16
+  %2  = load <16 x i8>, ptr %p2, align 16
+  %3  = load <16 x i8>, ptr %p3, align 16
+  %4  = load <16 x i8>, ptr %p4, align 16
+  %5  = load <16 x i8>, ptr %p5, align 16
+  %6  = load <16 x i8>, ptr %p6, align 16
+  %7  = load <16 x i8>, ptr %p7, align 16
+  %8  = load <16 x i8>, ptr %p8, align 16
+  %9  = load <16 x i8>, ptr %p9, align 16
+  %10 = load <16 x i8>, ptr %p10, align 16
+  %11 = load <16 x i8>, ptr %p11, align 16
+  %12 = load <16 x i8>, ptr %p12, align 16
+  %13 = load <16 x i8>, ptr %p13, align 16
+  %14 = load <16 x i8>, ptr %p14, align 16
+  %15 = load <16 x i8>, ptr %p15, align 16
+  %16 = load <16 x i8>, ptr %p16, align 16
+  %17 = load <16 x i8>, ptr %p17, align 16
+  %18 = load <16 x i8>, ptr %p18, align 16
+  %19 = load <16 x i8>, ptr %p19, align 16
+  %20 = load <16 x i8>, ptr %p20, align 16
+  %21 = load <16 x i8>, ptr %p21, align 16
+  %22 = load <16 x i8>, ptr %p22, align 16
+  %23 = load <16 x i8>, ptr %p23, align 16
+  %24 = load <16 x i8>, ptr %p24, align 16
+  %25 = load <16 x i8>, ptr %p25, align 16
+  %26 = load <16 x i8>, ptr %p26, align 16
+  %27 = load <16 x i8>, ptr %p27, align 16
+  %28 = load <16 x i8>, ptr %p28, align 16
+  %29 = load <16 x i8>, ptr %p29, align 16
+  %30 = load <16 x i8>, ptr %p30, align 16
+  %31 = load <16 x i8>, ptr %p31, align 16
+  %32 = load <16 x i8>, ptr %p32, align 16
+  %33 = load <16 x i8>, ptr %p33, align 16
   %r1  = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0,   <16 x i8> %1)
   %r2  = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1,  <16 x i8> %2)
   %r3  = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2,  <16 x i8> %3)
@@ -153,75 +153,75 @@ declare i32       @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind
 ; CHECK: ld.b {{.*}} Reload
 ; CHECK: .size
 
-define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
+define i32 @test_i16(ptr %p0, ptr %q1) nounwind {
 entry:
-  %p1  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 1
-  %p2  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 2
-  %p3  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 3
-  %p4  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 4
-  %p5  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 5
-  %p6  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 6
-  %p7  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 7
-  %p8  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 8
-  %p9  = getelementptr <8 x i16>, <8 x i16>* %p0, i32 9
-  %p10 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 10
-  %p11 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 11
-  %p12 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 12
-  %p13 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 13
-  %p14 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 14
-  %p15 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 15
-  %p16 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 16
-  %p17 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 17
-  %p18 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 18
-  %p19 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 19
-  %p20 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 20
-  %p21 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 21
-  %p22 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 22
-  %p23 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 23
-  %p24 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 24
-  %p25 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 25
-  %p26 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 26
-  %p27 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 27
-  %p28 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 28
-  %p29 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 29
-  %p30 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 30
-  %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31
-  %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32
-  %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33
-  %0  = load <8 x i16>, <8 x i16>* %p0, align 16
-  %1  = load <8 x i16>, <8 x i16>* %p1, align 16
-  %2  = load <8 x i16>, <8 x i16>* %p2, align 16
-  %3  = load <8 x i16>, <8 x i16>* %p3, align 16
-  %4  = load <8 x i16>, <8 x i16>* %p4, align 16
-  %5  = load <8 x i16>, <8 x i16>* %p5, align 16
-  %6  = load <8 x i16>, <8 x i16>* %p6, align 16
-  %7  = load <8 x i16>, <8 x i16>* %p7, align 16
-  %8  = load <8 x i16>, <8 x i16>* %p8, align 16
-  %9  = load <8 x i16>, <8 x i16>* %p9, align 16
-  %10 = load <8 x i16>, <8 x i16>* %p10, align 16
-  %11 = load <8 x i16>, <8 x i16>* %p11, align 16
-  %12 = load <8 x i16>, <8 x i16>* %p12, align 16
-  %13 = load <8 x i16>, <8 x i16>* %p13, align 16
-  %14 = load <8 x i16>, <8 x i16>* %p14, align 16
-  %15 = load <8 x i16>, <8 x i16>* %p15, align 16
-  %16 = load <8 x i16>, <8 x i16>* %p16, align 16
-  %17 = load <8 x i16>, <8 x i16>* %p17, align 16
-  %18 = load <8 x i16>, <8 x i16>* %p18, align 16
-  %19 = load <8 x i16>, <8 x i16>* %p19, align 16
-  %20 = load <8 x i16>, <8 x i16>* %p20, align 16
-  %21 = load <8 x i16>, <8 x i16>* %p21, align 16
-  %22 = load <8 x i16>, <8 x i16>* %p22, align 16
-  %23 = load <8 x i16>, <8 x i16>* %p23, align 16
-  %24 = load <8 x i16>, <8 x i16>* %p24, align 16
-  %25 = load <8 x i16>, <8 x i16>* %p25, align 16
-  %26 = load <8 x i16>, <8 x i16>* %p26, align 16
-  %27 = load <8 x i16>, <8 x i16>* %p27, align 16
-  %28 = load <8 x i16>, <8 x i16>* %p28, align 16
-  %29 = load <8 x i16>, <8 x i16>* %p29, align 16
-  %30 = load <8 x i16>, <8 x i16>* %p30, align 16
-  %31 = load <8 x i16>, <8 x i16>* %p31, align 16
-  %32 = load <8 x i16>, <8 x i16>* %p32, align 16
-  %33 = load <8 x i16>, <8 x i16>* %p33, align 16
+  %p1  = getelementptr <8 x i16>, ptr %p0, i32 1
+  %p2  = getelementptr <8 x i16>, ptr %p0, i32 2
+  %p3  = getelementptr <8 x i16>, ptr %p0, i32 3
+  %p4  = getelementptr <8 x i16>, ptr %p0, i32 4
+  %p5  = getelementptr <8 x i16>, ptr %p0, i32 5
+  %p6  = getelementptr <8 x i16>, ptr %p0, i32 6
+  %p7  = getelementptr <8 x i16>, ptr %p0, i32 7
+  %p8  = getelementptr <8 x i16>, ptr %p0, i32 8
+  %p9  = getelementptr <8 x i16>, ptr %p0, i32 9
+  %p10 = getelementptr <8 x i16>, ptr %p0, i32 10
+  %p11 = getelementptr <8 x i16>, ptr %p0, i32 11
+  %p12 = getelementptr <8 x i16>, ptr %p0, i32 12
+  %p13 = getelementptr <8 x i16>, ptr %p0, i32 13
+  %p14 = getelementptr <8 x i16>, ptr %p0, i32 14
+  %p15 = getelementptr <8 x i16>, ptr %p0, i32 15
+  %p16 = getelementptr <8 x i16>, ptr %p0, i32 16
+  %p17 = getelementptr <8 x i16>, ptr %p0, i32 17
+  %p18 = getelementptr <8 x i16>, ptr %p0, i32 18
+  %p19 = getelementptr <8 x i16>, ptr %p0, i32 19
+  %p20 = getelementptr <8 x i16>, ptr %p0, i32 20
+  %p21 = getelementptr <8 x i16>, ptr %p0, i32 21
+  %p22 = getelementptr <8 x i16>, ptr %p0, i32 22
+  %p23 = getelementptr <8 x i16>, ptr %p0, i32 23
+  %p24 = getelementptr <8 x i16>, ptr %p0, i32 24
+  %p25 = getelementptr <8 x i16>, ptr %p0, i32 25
+  %p26 = getelementptr <8 x i16>, ptr %p0, i32 26
+  %p27 = getelementptr <8 x i16>, ptr %p0, i32 27
+  %p28 = getelementptr <8 x i16>, ptr %p0, i32 28
+  %p29 = getelementptr <8 x i16>, ptr %p0, i32 29
+  %p30 = getelementptr <8 x i16>, ptr %p0, i32 30
+  %p31 = getelementptr <8 x i16>, ptr %p0, i32 31
+  %p32 = getelementptr <8 x i16>, ptr %p0, i32 32
+  %p33 = getelementptr <8 x i16>, ptr %p0, i32 33
+  %0  = load <8 x i16>, ptr %p0, align 16
+  %1  = load <8 x i16>, ptr %p1, align 16
+  %2  = load <8 x i16>, ptr %p2, align 16
+  %3  = load <8 x i16>, ptr %p3, align 16
+  %4  = load <8 x i16>, ptr %p4, align 16
+  %5  = load <8 x i16>, ptr %p5, align 16
+  %6  = load <8 x i16>, ptr %p6, align 16
+  %7  = load <8 x i16>, ptr %p7, align 16
+  %8  = load <8 x i16>, ptr %p8, align 16
+  %9  = load <8 x i16>, ptr %p9, align 16
+  %10 = load <8 x i16>, ptr %p10, align 16
+  %11 = load <8 x i16>, ptr %p11, align 16
+  %12 = load <8 x i16>, ptr %p12, align 16
+  %13 = load <8 x i16>, ptr %p13, align 16
+  %14 = load <8 x i16>, ptr %p14, align 16
+  %15 = load <8 x i16>, ptr %p15, align 16
+  %16 = load <8 x i16>, ptr %p16, align 16
+  %17 = load <8 x i16>, ptr %p17, align 16
+  %18 = load <8 x i16>, ptr %p18, align 16
+  %19 = load <8 x i16>, ptr %p19, align 16
+  %20 = load <8 x i16>, ptr %p20, align 16
+  %21 = load <8 x i16>, ptr %p21, align 16
+  %22 = load <8 x i16>, ptr %p22, align 16
+  %23 = load <8 x i16>, ptr %p23, align 16
+  %24 = load <8 x i16>, ptr %p24, align 16
+  %25 = load <8 x i16>, ptr %p25, align 16
+  %26 = load <8 x i16>, ptr %p26, align 16
+  %27 = load <8 x i16>, ptr %p27, align 16
+  %28 = load <8 x i16>, ptr %p28, align 16
+  %29 = load <8 x i16>, ptr %p29, align 16
+  %30 = load <8 x i16>, ptr %p30, align 16
+  %31 = load <8 x i16>, ptr %p31, align 16
+  %32 = load <8 x i16>, ptr %p32, align 16
+  %33 = load <8 x i16>, ptr %p33, align 16
   %r1  = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0,   <8 x i16> %1)
   %r2  = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1,  <8 x i16> %2)
   %r3  = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2,  <8 x i16> %3)
@@ -302,75 +302,75 @@ declare i32       @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind
 ; CHECK: ld.h {{.*}} Reload
 ; CHECK: .size
 
-define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
+define i32 @test_i32(ptr %p0, ptr %q1) nounwind {
 entry:
-  %p1  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 1
-  %p2  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 2
-  %p3  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 3
-  %p4  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 4
-  %p5  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 5
-  %p6  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 6
-  %p7  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 7
-  %p8  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 8
-  %p9  = getelementptr <4 x i32>, <4 x i32>* %p0, i32 9
-  %p10 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 10
-  %p11 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 11
-  %p12 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 12
-  %p13 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 13
-  %p14 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 14
-  %p15 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 15
-  %p16 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 16
-  %p17 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 17
-  %p18 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 18
-  %p19 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 19
-  %p20 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 20
-  %p21 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 21
-  %p22 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 22
-  %p23 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 23
-  %p24 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 24
-  %p25 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 25
-  %p26 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 26
-  %p27 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 27
-  %p28 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 28
-  %p29 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 29
-  %p30 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 30
-  %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31
-  %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32
-  %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33
-  %0  = load <4 x i32>, <4 x i32>* %p0, align 16
-  %1  = load <4 x i32>, <4 x i32>* %p1, align 16
-  %2  = load <4 x i32>, <4 x i32>* %p2, align 16
-  %3  = load <4 x i32>, <4 x i32>* %p3, align 16
-  %4  = load <4 x i32>, <4 x i32>* %p4, align 16
-  %5  = load <4 x i32>, <4 x i32>* %p5, align 16
-  %6  = load <4 x i32>, <4 x i32>* %p6, align 16
-  %7  = load <4 x i32>, <4 x i32>* %p7, align 16
-  %8  = load <4 x i32>, <4 x i32>* %p8, align 16
-  %9  = load <4 x i32>, <4 x i32>* %p9, align 16
-  %10 = load <4 x i32>, <4 x i32>* %p10, align 16
-  %11 = load <4 x i32>, <4 x i32>* %p11, align 16
-  %12 = load <4 x i32>, <4 x i32>* %p12, align 16
-  %13 = load <4 x i32>, <4 x i32>* %p13, align 16
-  %14 = load <4 x i32>, <4 x i32>* %p14, align 16
-  %15 = load <4 x i32>, <4 x i32>* %p15, align 16
-  %16 = load <4 x i32>, <4 x i32>* %p16, align 16
-  %17 = load <4 x i32>, <4 x i32>* %p17, align 16
-  %18 = load <4 x i32>, <4 x i32>* %p18, align 16
-  %19 = load <4 x i32>, <4 x i32>* %p19, align 16
-  %20 = load <4 x i32>, <4 x i32>* %p20, align 16
-  %21 = load <4 x i32>, <4 x i32>* %p21, align 16
-  %22 = load <4 x i32>, <4 x i32>* %p22, align 16
-  %23 = load <4 x i32>, <4 x i32>* %p23, align 16
-  %24 = load <4 x i32>, <4 x i32>* %p24, align 16
-  %25 = load <4 x i32>, <4 x i32>* %p25, align 16
-  %26 = load <4 x i32>, <4 x i32>* %p26, align 16
-  %27 = load <4 x i32>, <4 x i32>* %p27, align 16
-  %28 = load <4 x i32>, <4 x i32>* %p28, align 16
-  %29 = load <4 x i32>, <4 x i32>* %p29, align 16
-  %30 = load <4 x i32>, <4 x i32>* %p30, align 16
-  %31 = load <4 x i32>, <4 x i32>* %p31, align 16
-  %32 = load <4 x i32>, <4 x i32>* %p32, align 16
-  %33 = load <4 x i32>, <4 x i32>* %p33, align 16
+  %p1  = getelementptr <4 x i32>, ptr %p0, i32 1
+  %p2  = getelementptr <4 x i32>, ptr %p0, i32 2
+  %p3  = getelementptr <4 x i32>, ptr %p0, i32 3
+  %p4  = getelementptr <4 x i32>, ptr %p0, i32 4
+  %p5  = getelementptr <4 x i32>, ptr %p0, i32 5
+  %p6  = getelementptr <4 x i32>, ptr %p0, i32 6
+  %p7  = getelementptr <4 x i32>, ptr %p0, i32 7
+  %p8  = getelementptr <4 x i32>, ptr %p0, i32 8
+  %p9  = getelementptr <4 x i32>, ptr %p0, i32 9
+  %p10 = getelementptr <4 x i32>, ptr %p0, i32 10
+  %p11 = getelementptr <4 x i32>, ptr %p0, i32 11
+  %p12 = getelementptr <4 x i32>, ptr %p0, i32 12
+  %p13 = getelementptr <4 x i32>, ptr %p0, i32 13
+  %p14 = getelementptr <4 x i32>, ptr %p0, i32 14
+  %p15 = getelementptr <4 x i32>, ptr %p0, i32 15
+  %p16 = getelementptr <4 x i32>, ptr %p0, i32 16
+  %p17 = getelementptr <4 x i32>, ptr %p0, i32 17
+  %p18 = getelementptr <4 x i32>, ptr %p0, i32 18
+  %p19 = getelementptr <4 x i32>, ptr %p0, i32 19
+  %p20 = getelementptr <4 x i32>, ptr %p0, i32 20
+  %p21 = getelementptr <4 x i32>, ptr %p0, i32 21
+  %p22 = getelementptr <4 x i32>, ptr %p0, i32 22
+  %p23 = getelementptr <4 x i32>, ptr %p0, i32 23
+  %p24 = getelementptr <4 x i32>, ptr %p0, i32 24
+  %p25 = getelementptr <4 x i32>, ptr %p0, i32 25
+  %p26 = getelementptr <4 x i32>, ptr %p0, i32 26
+  %p27 = getelementptr <4 x i32>, ptr %p0, i32 27
+  %p28 = getelementptr <4 x i32>, ptr %p0, i32 28
+  %p29 = getelementptr <4 x i32>, ptr %p0, i32 29
+  %p30 = getelementptr <4 x i32>, ptr %p0, i32 30
+  %p31 = getelementptr <4 x i32>, ptr %p0, i32 31
+  %p32 = getelementptr <4 x i32>, ptr %p0, i32 32
+  %p33 = getelementptr <4 x i32>, ptr %p0, i32 33
+  %0  = load <4 x i32>, ptr %p0, align 16
+  %1  = load <4 x i32>, ptr %p1, align 16
+  %2  = load <4 x i32>, ptr %p2, align 16
+  %3  = load <4 x i32>, ptr %p3, align 16
+  %4  = load <4 x i32>, ptr %p4, align 16
+  %5  = load <4 x i32>, ptr %p5, align 16
+  %6  = load <4 x i32>, ptr %p6, align 16
+  %7  = load <4 x i32>, ptr %p7, align 16
+  %8  = load <4 x i32>, ptr %p8, align 16
+  %9  = load <4 x i32>, ptr %p9, align 16
+  %10 = load <4 x i32>, ptr %p10, align 16
+  %11 = load <4 x i32>, ptr %p11, align 16
+  %12 = load <4 x i32>, ptr %p12, align 16
+  %13 = load <4 x i32>, ptr %p13, align 16
+  %14 = load <4 x i32>, ptr %p14, align 16
+  %15 = load <4 x i32>, ptr %p15, align 16
+  %16 = load <4 x i32>, ptr %p16, align 16
+  %17 = load <4 x i32>, ptr %p17, align 16
+  %18 = load <4 x i32>, ptr %p18, align 16
+  %19 = load <4 x i32>, ptr %p19, align 16
+  %20 = load <4 x i32>, ptr %p20, align 16
+  %21 = load <4 x i32>, ptr %p21, align 16
+  %22 = load <4 x i32>, ptr %p22, align 16
+  %23 = load <4 x i32>, ptr %p23, align 16
+  %24 = load <4 x i32>, ptr %p24, align 16
+  %25 = load <4 x i32>, ptr %p25, align 16
+  %26 = load <4 x i32>, ptr %p26, align 16
+  %27 = load <4 x i32>, ptr %p27, align 16
+  %28 = load <4 x i32>, ptr %p28, align 16
+  %29 = load <4 x i32>, ptr %p29, align 16
+  %30 = load <4 x i32>, ptr %p30, align 16
+  %31 = load <4 x i32>, ptr %p31, align 16
+  %32 = load <4 x i32>, ptr %p32, align 16
+  %33 = load <4 x i32>, ptr %p33, align 16
   %r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1)
   %r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2)
   %r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3)
@@ -451,75 +451,75 @@ declare i32       @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind
 ; CHECK: ld.w {{.*}} Reload
 ; CHECK: .size
 
-define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
+define i32 @test_i64(ptr %p0, ptr %q1) nounwind {
 entry:
-  %p1  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 1
-  %p2  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 2
-  %p3  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 3
-  %p4  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 4
-  %p5  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 5
-  %p6  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 6
-  %p7  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 7
-  %p8  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 8
-  %p9  = getelementptr <2 x i64>, <2 x i64>* %p0, i32 9
-  %p10 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 10
-  %p11 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 11
-  %p12 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 12
-  %p13 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 13
-  %p14 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 14
-  %p15 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 15
-  %p16 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 16
-  %p17 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 17
-  %p18 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 18
-  %p19 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 19
-  %p20 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 20
-  %p21 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 21
-  %p22 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 22
-  %p23 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 23
-  %p24 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 24
-  %p25 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 25
-  %p26 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 26
-  %p27 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 27
-  %p28 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 28
-  %p29 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 29
-  %p30 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 30
-  %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31
-  %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32
-  %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33
-  %0  = load <2 x i64>, <2 x i64>* %p0, align 16
-  %1  = load <2 x i64>, <2 x i64>* %p1, align 16
-  %2  = load <2 x i64>, <2 x i64>* %p2, align 16
-  %3  = load <2 x i64>, <2 x i64>* %p3, align 16
-  %4  = load <2 x i64>, <2 x i64>* %p4, align 16
-  %5  = load <2 x i64>, <2 x i64>* %p5, align 16
-  %6  = load <2 x i64>, <2 x i64>* %p6, align 16
-  %7  = load <2 x i64>, <2 x i64>* %p7, align 16
-  %8  = load <2 x i64>, <2 x i64>* %p8, align 16
-  %9  = load <2 x i64>, <2 x i64>* %p9, align 16
-  %10 = load <2 x i64>, <2 x i64>* %p10, align 16
-  %11 = load <2 x i64>, <2 x i64>* %p11, align 16
-  %12 = load <2 x i64>, <2 x i64>* %p12, align 16
-  %13 = load <2 x i64>, <2 x i64>* %p13, align 16
-  %14 = load <2 x i64>, <2 x i64>* %p14, align 16
-  %15 = load <2 x i64>, <2 x i64>* %p15, align 16
-  %16 = load <2 x i64>, <2 x i64>* %p16, align 16
-  %17 = load <2 x i64>, <2 x i64>* %p17, align 16
-  %18 = load <2 x i64>, <2 x i64>* %p18, align 16
-  %19 = load <2 x i64>, <2 x i64>* %p19, align 16
-  %20 = load <2 x i64>, <2 x i64>* %p20, align 16
-  %21 = load <2 x i64>, <2 x i64>* %p21, align 16
-  %22 = load <2 x i64>, <2 x i64>* %p22, align 16
-  %23 = load <2 x i64>, <2 x i64>* %p23, align 16
-  %24 = load <2 x i64>, <2 x i64>* %p24, align 16
-  %25 = load <2 x i64>, <2 x i64>* %p25, align 16
-  %26 = load <2 x i64>, <2 x i64>* %p26, align 16
-  %27 = load <2 x i64>, <2 x i64>* %p27, align 16
-  %28 = load <2 x i64>, <2 x i64>* %p28, align 16
-  %29 = load <2 x i64>, <2 x i64>* %p29, align 16
-  %30 = load <2 x i64>, <2 x i64>* %p30, align 16
-  %31 = load <2 x i64>, <2 x i64>* %p31, align 16
-  %32 = load <2 x i64>, <2 x i64>* %p32, align 16
-  %33 = load <2 x i64>, <2 x i64>* %p33, align 16
+  %p1  = getelementptr <2 x i64>, ptr %p0, i32 1
+  %p2  = getelementptr <2 x i64>, ptr %p0, i32 2
+  %p3  = getelementptr <2 x i64>, ptr %p0, i32 3
+  %p4  = getelementptr <2 x i64>, ptr %p0, i32 4
+  %p5  = getelementptr <2 x i64>, ptr %p0, i32 5
+  %p6  = getelementptr <2 x i64>, ptr %p0, i32 6
+  %p7  = getelementptr <2 x i64>, ptr %p0, i32 7
+  %p8  = getelementptr <2 x i64>, ptr %p0, i32 8
+  %p9  = getelementptr <2 x i64>, ptr %p0, i32 9
+  %p10 = getelementptr <2 x i64>, ptr %p0, i32 10
+  %p11 = getelementptr <2 x i64>, ptr %p0, i32 11
+  %p12 = getelementptr <2 x i64>, ptr %p0, i32 12
+  %p13 = getelementptr <2 x i64>, ptr %p0, i32 13
+  %p14 = getelementptr <2 x i64>, ptr %p0, i32 14
+  %p15 = getelementptr <2 x i64>, ptr %p0, i32 15
+  %p16 = getelementptr <2 x i64>, ptr %p0, i32 16
+  %p17 = getelementptr <2 x i64>, ptr %p0, i32 17
+  %p18 = getelementptr <2 x i64>, ptr %p0, i32 18
+  %p19 = getelementptr <2 x i64>, ptr %p0, i32 19
+  %p20 = getelementptr <2 x i64>, ptr %p0, i32 20
+  %p21 = getelementptr <2 x i64>, ptr %p0, i32 21
+  %p22 = getelementptr <2 x i64>, ptr %p0, i32 22
+  %p23 = getelementptr <2 x i64>, ptr %p0, i32 23
+  %p24 = getelementptr <2 x i64>, ptr %p0, i32 24
+  %p25 = getelementptr <2 x i64>, ptr %p0, i32 25
+  %p26 = getelementptr <2 x i64>, ptr %p0, i32 26
+  %p27 = getelementptr <2 x i64>, ptr %p0, i32 27
+  %p28 = getelementptr <2 x i64>, ptr %p0, i32 28
+  %p29 = getelementptr <2 x i64>, ptr %p0, i32 29
+  %p30 = getelementptr <2 x i64>, ptr %p0, i32 30
+  %p31 = getelementptr <2 x i64>, ptr %p0, i32 31
+  %p32 = getelementptr <2 x i64>, ptr %p0, i32 32
+  %p33 = getelementptr <2 x i64>, ptr %p0, i32 33
+  %0  = load <2 x i64>, ptr %p0, align 16
+  %1  = load <2 x i64>, ptr %p1, align 16
+  %2  = load <2 x i64>, ptr %p2, align 16
+  %3  = load <2 x i64>, ptr %p3, align 16
+  %4  = load <2 x i64>, ptr %p4, align 16
+  %5  = load <2 x i64>, ptr %p5, align 16
+  %6  = load <2 x i64>, ptr %p6, align 16
+  %7  = load <2 x i64>, ptr %p7, align 16
+  %8  = load <2 x i64>, ptr %p8, align 16
+  %9  = load <2 x i64>, ptr %p9, align 16
+  %10 = load <2 x i64>, ptr %p10, align 16
+  %11 = load <2 x i64>, ptr %p11, align 16
+  %12 = load <2 x i64>, ptr %p12, align 16
+  %13 = load <2 x i64>, ptr %p13, align 16
+  %14 = load <2 x i64>, ptr %p14, align 16
+  %15 = load <2 x i64>, ptr %p15, align 16
+  %16 = load <2 x i64>, ptr %p16, align 16
+  %17 = load <2 x i64>, ptr %p17, align 16
+  %18 = load <2 x i64>, ptr %p18, align 16
+  %19 = load <2 x i64>, ptr %p19, align 16
+  %20 = load <2 x i64>, ptr %p20, align 16
+  %21 = load <2 x i64>, ptr %p21, align 16
+  %22 = load <2 x i64>, ptr %p22, align 16
+  %23 = load <2 x i64>, ptr %p23, align 16
+  %24 = load <2 x i64>, ptr %p24, align 16
+  %25 = load <2 x i64>, ptr %p25, align 16
+  %26 = load <2 x i64>, ptr %p26, align 16
+  %27 = load <2 x i64>, ptr %p27, align 16
+  %28 = load <2 x i64>, ptr %p28, align 16
+  %29 = load <2 x i64>, ptr %p29, align 16
+  %30 = load <2 x i64>, ptr %p30, align 16
+  %31 = load <2 x i64>, ptr %p31, align 16
+  %32 = load <2 x i64>, ptr %p32, align 16
+  %33 = load <2 x i64>, ptr %p33, align 16
   %r1  = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0,   <2 x i64> %1)
   %r2  = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1,  <2 x i64> %2)
   %r3  = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2,  <2 x i64> %3)

diff  --git a/llvm/test/CodeGen/Mips/msa/vec.ll b/llvm/test/CodeGen/Mips/msa/vec.ll
index de57a0250e847..cc4eba6c95bf1 100644
--- a/llvm/test/CodeGen/Mips/msa/vec.ll
+++ b/llvm/test/CodeGen/Mips/msa/vec.ll
@@ -11,13 +11,13 @@
 
 define void @llvm_mips_and_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
   %2 = bitcast <16 x i8> %0 to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <16 x i8>
-  store <16 x i8> %5, <16 x i8>* @llvm_mips_and_v_b_RES
+  store <16 x i8> %5, ptr @llvm_mips_and_v_b_RES
   ret void
 }
 
@@ -34,13 +34,13 @@ entry:
 
 define void @llvm_mips_and_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
   %2 = bitcast <8 x i16> %0 to <16 x i8>
   %3 = bitcast <8 x i16> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <8 x i16>
-  store <8 x i16> %5, <8 x i16>* @llvm_mips_and_v_h_RES
+  store <8 x i16> %5, ptr @llvm_mips_and_v_h_RES
   ret void
 }
 
@@ -57,13 +57,13 @@ entry:
 
 define void @llvm_mips_and_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
   %2 = bitcast <4 x i32> %0 to <16 x i8>
   %3 = bitcast <4 x i32> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <4 x i32>
-  store <4 x i32> %5, <4 x i32>* @llvm_mips_and_v_w_RES
+  store <4 x i32> %5, ptr @llvm_mips_and_v_w_RES
   ret void
 }
 
@@ -80,13 +80,13 @@ entry:
 
 define void @llvm_mips_and_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
   %2 = bitcast <2 x i64> %0 to <16 x i8>
   %3 = bitcast <2 x i64> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <2 x i64>
-  store <2 x i64> %5, <2 x i64>* @llvm_mips_and_v_d_RES
+  store <2 x i64> %5, ptr @llvm_mips_and_v_d_RES
   ret void
 }
 
@@ -99,10 +99,10 @@ entry:
 ;
 define void @and_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_and_v_b_ARG2
   %2 = and <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_and_v_b_RES
   ret void
 }
 
@@ -115,10 +115,10 @@ entry:
 ;
 define void @and_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_and_v_h_ARG2
   %2 = and <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_and_v_h_RES
   ret void
 }
 
@@ -132,10 +132,10 @@ entry:
 
 define void @and_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_and_v_w_ARG2
   %2 = and <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_and_v_w_RES
   ret void
 }
 
@@ -149,10 +149,10 @@ entry:
 
 define void @and_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_and_v_d_ARG2
   %2 = and <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_and_v_d_RES
   ret void
 }
 
@@ -170,15 +170,15 @@ entry:
 
 define void @llvm_mips_bmnz_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_bmnz_v_b_ARG3
   %3 = bitcast <16 x i8> %0 to <16 x i8>
   %4 = bitcast <16 x i8> %1 to <16 x i8>
   %5 = bitcast <16 x i8> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <16 x i8>
-  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmnz_v_b_RES
+  store <16 x i8> %7, ptr @llvm_mips_bmnz_v_b_RES
   ret void
 }
 
@@ -200,15 +200,15 @@ entry:
 
 define void @llvm_mips_bmnz_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_bmnz_v_h_ARG3
   %3 = bitcast <8 x i16> %0 to <16 x i8>
   %4 = bitcast <8 x i16> %1 to <16 x i8>
   %5 = bitcast <8 x i16> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <8 x i16>
-  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmnz_v_h_RES
+  store <8 x i16> %7, ptr @llvm_mips_bmnz_v_h_RES
   ret void
 }
 
@@ -230,15 +230,15 @@ entry:
 
 define void @llvm_mips_bmnz_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_bmnz_v_w_ARG3
   %3 = bitcast <4 x i32> %0 to <16 x i8>
   %4 = bitcast <4 x i32> %1 to <16 x i8>
   %5 = bitcast <4 x i32> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <4 x i32>
-  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmnz_v_w_RES
+  store <4 x i32> %7, ptr @llvm_mips_bmnz_v_w_RES
   ret void
 }
 
@@ -260,15 +260,15 @@ entry:
 
 define void @llvm_mips_bmnz_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_bmnz_v_d_ARG3
   %3 = bitcast <2 x i64> %0 to <16 x i8>
   %4 = bitcast <2 x i64> %1 to <16 x i8>
   %5 = bitcast <2 x i64> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmnz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <2 x i64>
-  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmnz_v_d_RES
+  store <2 x i64> %7, ptr @llvm_mips_bmnz_v_d_RES
   ret void
 }
 
@@ -290,15 +290,15 @@ entry:
 
 define void @llvm_mips_bmz_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_bmz_v_b_ARG3
   %3 = bitcast <16 x i8> %0 to <16 x i8>
   %4 = bitcast <16 x i8> %1 to <16 x i8>
   %5 = bitcast <16 x i8> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <16 x i8>
-  store <16 x i8> %7, <16 x i8>* @llvm_mips_bmz_v_b_RES
+  store <16 x i8> %7, ptr @llvm_mips_bmz_v_b_RES
   ret void
 }
 
@@ -321,15 +321,15 @@ entry:
 
 define void @llvm_mips_bmz_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_bmz_v_h_ARG3
   %3 = bitcast <8 x i16> %0 to <16 x i8>
   %4 = bitcast <8 x i16> %1 to <16 x i8>
   %5 = bitcast <8 x i16> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <8 x i16>
-  store <8 x i16> %7, <8 x i16>* @llvm_mips_bmz_v_h_RES
+  store <8 x i16> %7, ptr @llvm_mips_bmz_v_h_RES
   ret void
 }
 
@@ -352,15 +352,15 @@ entry:
 
 define void @llvm_mips_bmz_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_bmz_v_w_ARG3
   %3 = bitcast <4 x i32> %0 to <16 x i8>
   %4 = bitcast <4 x i32> %1 to <16 x i8>
   %5 = bitcast <4 x i32> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <4 x i32>
-  store <4 x i32> %7, <4 x i32>* @llvm_mips_bmz_v_w_RES
+  store <4 x i32> %7, ptr @llvm_mips_bmz_v_w_RES
   ret void
 }
 
@@ -383,15 +383,15 @@ entry:
 
 define void @llvm_mips_bmz_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_bmz_v_d_ARG3
   %3 = bitcast <2 x i64> %0 to <16 x i8>
   %4 = bitcast <2 x i64> %1 to <16 x i8>
   %5 = bitcast <2 x i64> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bmz.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <2 x i64>
-  store <2 x i64> %7, <2 x i64>* @llvm_mips_bmz_v_d_RES
+  store <2 x i64> %7, ptr @llvm_mips_bmz_v_d_RES
   ret void
 }
 
@@ -414,15 +414,15 @@ entry:
 
 define void @llvm_mips_bsel_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2
-  %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3
+  %0 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG2
+  %2 = load <16 x i8>, ptr @llvm_mips_bsel_v_b_ARG3
   %3 = bitcast <16 x i8> %0 to <16 x i8>
   %4 = bitcast <16 x i8> %1 to <16 x i8>
   %5 = bitcast <16 x i8> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <16 x i8>
-  store <16 x i8> %7, <16 x i8>* @llvm_mips_bsel_v_b_RES
+  store <16 x i8> %7, ptr @llvm_mips_bsel_v_b_RES
   ret void
 }
 
@@ -445,15 +445,15 @@ entry:
 
 define void @llvm_mips_bsel_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2
-  %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3
+  %0 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG2
+  %2 = load <8 x i16>, ptr @llvm_mips_bsel_v_h_ARG3
   %3 = bitcast <8 x i16> %0 to <16 x i8>
   %4 = bitcast <8 x i16> %1 to <16 x i8>
   %5 = bitcast <8 x i16> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <8 x i16>
-  store <8 x i16> %7, <8 x i16>* @llvm_mips_bsel_v_h_RES
+  store <8 x i16> %7, ptr @llvm_mips_bsel_v_h_RES
   ret void
 }
 
@@ -476,15 +476,15 @@ entry:
 
 define void @llvm_mips_bsel_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2
-  %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3
+  %0 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG2
+  %2 = load <4 x i32>, ptr @llvm_mips_bsel_v_w_ARG3
   %3 = bitcast <4 x i32> %0 to <16 x i8>
   %4 = bitcast <4 x i32> %1 to <16 x i8>
   %5 = bitcast <4 x i32> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <4 x i32>
-  store <4 x i32> %7, <4 x i32>* @llvm_mips_bsel_v_w_RES
+  store <4 x i32> %7, ptr @llvm_mips_bsel_v_w_RES
   ret void
 }
 
@@ -507,15 +507,15 @@ entry:
 
 define void @llvm_mips_bsel_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2
-  %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3
+  %0 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG2
+  %2 = load <2 x i64>, ptr @llvm_mips_bsel_v_d_ARG3
   %3 = bitcast <2 x i64> %0 to <16 x i8>
   %4 = bitcast <2 x i64> %1 to <16 x i8>
   %5 = bitcast <2 x i64> %2 to <16 x i8>
   %6 = tail call <16 x i8> @llvm.mips.bsel.v(<16 x i8> %3, <16 x i8> %4, <16 x i8> %5)
   %7 = bitcast <16 x i8> %6 to <2 x i64>
-  store <2 x i64> %7, <2 x i64>* @llvm_mips_bsel_v_d_RES
+  store <2 x i64> %7, ptr @llvm_mips_bsel_v_d_RES
   ret void
 }
 
@@ -537,13 +537,13 @@ entry:
 
 define void @llvm_mips_nor_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_nor_v_b_ARG2
   %2 = bitcast <16 x i8> %0 to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <16 x i8>
-  store <16 x i8> %5, <16 x i8>* @llvm_mips_nor_v_b_RES
+  store <16 x i8> %5, ptr @llvm_mips_nor_v_b_RES
   ret void
 }
 
@@ -560,13 +560,13 @@ entry:
 
 define void @llvm_mips_nor_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_nor_v_h_ARG2
   %2 = bitcast <8 x i16> %0 to <16 x i8>
   %3 = bitcast <8 x i16> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <8 x i16>
-  store <8 x i16> %5, <8 x i16>* @llvm_mips_nor_v_h_RES
+  store <8 x i16> %5, ptr @llvm_mips_nor_v_h_RES
   ret void
 }
 
@@ -583,13 +583,13 @@ entry:
 
 define void @llvm_mips_nor_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_nor_v_w_ARG2
   %2 = bitcast <4 x i32> %0 to <16 x i8>
   %3 = bitcast <4 x i32> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <4 x i32>
-  store <4 x i32> %5, <4 x i32>* @llvm_mips_nor_v_w_RES
+  store <4 x i32> %5, ptr @llvm_mips_nor_v_w_RES
   ret void
 }
 
@@ -606,13 +606,13 @@ entry:
 
 define void @llvm_mips_nor_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_nor_v_d_ARG2
   %2 = bitcast <2 x i64> %0 to <16 x i8>
   %3 = bitcast <2 x i64> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <2 x i64>
-  store <2 x i64> %5, <2 x i64>* @llvm_mips_nor_v_d_RES
+  store <2 x i64> %5, ptr @llvm_mips_nor_v_d_RES
   ret void
 }
 
@@ -629,13 +629,13 @@ entry:
 
 define void @llvm_mips_or_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
   %2 = bitcast <16 x i8> %0 to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <16 x i8>
-  store <16 x i8> %5, <16 x i8>* @llvm_mips_or_v_b_RES
+  store <16 x i8> %5, ptr @llvm_mips_or_v_b_RES
   ret void
 }
 
@@ -652,13 +652,13 @@ entry:
 
 define void @llvm_mips_or_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
   %2 = bitcast <8 x i16> %0 to <16 x i8>
   %3 = bitcast <8 x i16> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <8 x i16>
-  store <8 x i16> %5, <8 x i16>* @llvm_mips_or_v_h_RES
+  store <8 x i16> %5, ptr @llvm_mips_or_v_h_RES
   ret void
 }
 
@@ -675,13 +675,13 @@ entry:
 
 define void @llvm_mips_or_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
   %2 = bitcast <4 x i32> %0 to <16 x i8>
   %3 = bitcast <4 x i32> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <4 x i32>
-  store <4 x i32> %5, <4 x i32>* @llvm_mips_or_v_w_RES
+  store <4 x i32> %5, ptr @llvm_mips_or_v_w_RES
   ret void
 }
 
@@ -698,13 +698,13 @@ entry:
 
 define void @llvm_mips_or_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
   %2 = bitcast <2 x i64> %0 to <16 x i8>
   %3 = bitcast <2 x i64> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <2 x i64>
-  store <2 x i64> %5, <2 x i64>* @llvm_mips_or_v_d_RES
+  store <2 x i64> %5, ptr @llvm_mips_or_v_d_RES
   ret void
 }
 
@@ -717,10 +717,10 @@ entry:
 ;
 define void @or_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_or_v_b_ARG2
   %2 = or <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_or_v_b_RES
   ret void
 }
 
@@ -733,10 +733,10 @@ entry:
 ;
 define void @or_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_or_v_h_ARG2
   %2 = or <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_or_v_h_RES
   ret void
 }
 
@@ -750,10 +750,10 @@ entry:
 
 define void @or_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_or_v_w_ARG2
   %2 = or <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_or_v_w_RES
   ret void
 }
 
@@ -767,10 +767,10 @@ entry:
 
 define void @or_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_or_v_d_ARG2
   %2 = or <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_or_v_d_RES
   ret void
 }
 
@@ -787,13 +787,13 @@ entry:
 
 define void @llvm_mips_xor_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
   %2 = bitcast <16 x i8> %0 to <16 x i8>
   %3 = bitcast <16 x i8> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <16 x i8>
-  store <16 x i8> %5, <16 x i8>* @llvm_mips_xor_v_b_RES
+  store <16 x i8> %5, ptr @llvm_mips_xor_v_b_RES
   ret void
 }
 
@@ -810,13 +810,13 @@ entry:
 
 define void @llvm_mips_xor_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
   %2 = bitcast <8 x i16> %0 to <16 x i8>
   %3 = bitcast <8 x i16> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <8 x i16>
-  store <8 x i16> %5, <8 x i16>* @llvm_mips_xor_v_h_RES
+  store <8 x i16> %5, ptr @llvm_mips_xor_v_h_RES
   ret void
 }
 
@@ -833,13 +833,13 @@ entry:
 
 define void @llvm_mips_xor_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
   %2 = bitcast <4 x i32> %0 to <16 x i8>
   %3 = bitcast <4 x i32> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <4 x i32>
-  store <4 x i32> %5, <4 x i32>* @llvm_mips_xor_v_w_RES
+  store <4 x i32> %5, ptr @llvm_mips_xor_v_w_RES
   ret void
 }
 
@@ -856,13 +856,13 @@ entry:
 
 define void @llvm_mips_xor_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
   %2 = bitcast <2 x i64> %0 to <16 x i8>
   %3 = bitcast <2 x i64> %1 to <16 x i8>
   %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3)
   %5 = bitcast <16 x i8> %4 to <2 x i64>
-  store <2 x i64> %5, <2 x i64>* @llvm_mips_xor_v_d_RES
+  store <2 x i64> %5, ptr @llvm_mips_xor_v_d_RES
   ret void
 }
 
@@ -875,10 +875,10 @@ entry:
 ;
 define void @xor_v_b_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1
-  %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2
+  %0 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG1
+  %1 = load <16 x i8>, ptr @llvm_mips_xor_v_b_ARG2
   %2 = xor <16 x i8> %0, %1
-  store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES
+  store <16 x i8> %2, ptr @llvm_mips_xor_v_b_RES
   ret void
 }
 
@@ -891,10 +891,10 @@ entry:
 ;
 define void @xor_v_h_test() nounwind {
 entry:
-  %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1
-  %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2
+  %0 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG1
+  %1 = load <8 x i16>, ptr @llvm_mips_xor_v_h_ARG2
   %2 = xor <8 x i16> %0, %1
-  store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES
+  store <8 x i16> %2, ptr @llvm_mips_xor_v_h_RES
   ret void
 }
 
@@ -908,10 +908,10 @@ entry:
 
 define void @xor_v_w_test() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1
-  %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2
+  %0 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG1
+  %1 = load <4 x i32>, ptr @llvm_mips_xor_v_w_ARG2
   %2 = xor <4 x i32> %0, %1
-  store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES
+  store <4 x i32> %2, ptr @llvm_mips_xor_v_w_RES
   ret void
 }
 
@@ -925,10 +925,10 @@ entry:
 
 define void @xor_v_d_test() nounwind {
 entry:
-  %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1
-  %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2
+  %0 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG1
+  %1 = load <2 x i64>, ptr @llvm_mips_xor_v_d_ARG2
   %2 = xor <2 x i64> %0, %1
-  store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES
+  store <2 x i64> %2, ptr @llvm_mips_xor_v_d_RES
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/msa/vecs10.ll b/llvm/test/CodeGen/Mips/msa/vecs10.ll
index 9f6be4d47330e..ce61efc33f3a4 100644
--- a/llvm/test/CodeGen/Mips/msa/vecs10.ll
+++ b/llvm/test/CodeGen/Mips/msa/vecs10.ll
@@ -7,7 +7,7 @@
 
 define i32 @llvm_mips_bnz_v_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_v_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bnz_v_ARG1
   %1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false
@@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind
 
 define i32 @llvm_mips_bz_v_test() nounwind {
 entry:
-  %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bz_v_ARG1
+  %0 = load <16 x i8>, ptr @llvm_mips_bz_v_ARG1
   %1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0)
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %true, label %false

diff  --git a/llvm/test/CodeGen/Mips/mul.ll b/llvm/test/CodeGen/Mips/mul.ll
index 9e053fc2e7d67..41ea550dbce7a 100644
--- a/llvm/test/CodeGen/Mips/mul.ll
+++ b/llvm/test/CodeGen/Mips/mul.ll
@@ -6,12 +6,12 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %mul = mul nsw i32 %1, %0
 ; 16:	mult	${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mflo	${{[0-9]+}}
 
-  store i32 %mul, i32* @kkkk, align 4
+  store i32 %mul, ptr @kkkk, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/mulll.ll b/llvm/test/CodeGen/Mips/mulll.ll
index 9a2acd4173815..08813809bd5f0 100644
--- a/llvm/test/CodeGen/Mips/mulll.ll
+++ b/llvm/test/CodeGen/Mips/mulll.ll
@@ -6,10 +6,10 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i64, i64* @iiii, align 8
-  %1 = load i64, i64* @jjjj, align 8
+  %0 = load i64, ptr @iiii, align 8
+  %1 = load i64, ptr @jjjj, align 8
   %mul = mul nsw i64 %1, %0
-  store i64 %mul, i64* @kkkk, align 8
+  store i64 %mul, ptr @kkkk, align 8
 ; 16:	multu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
 ; 16:	mult	${{[0-9]+}}, ${{[0-9]+}}

diff  --git a/llvm/test/CodeGen/Mips/nacl-align.ll b/llvm/test/CodeGen/Mips/nacl-align.ll
index 74b6286648ddf..bca6c93de2624 100644
--- a/llvm/test/CodeGen/Mips/nacl-align.ll
+++ b/llvm/test/CodeGen/Mips/nacl-align.ll
@@ -61,14 +61,14 @@ default:
 
 ; This test tests that a block whose address is taken is bundle-aligned in NaCl.
 
- at bb_array = constant [2 x i8*] [i8* blockaddress(@test2, %bb1),
-                                i8* blockaddress(@test2, %bb2)], align 4
+ at bb_array = constant [2 x ptr] [ptr blockaddress(@test2, %bb1),
+                                ptr blockaddress(@test2, %bb2)], align 4
 
 define i32 @test2(i32 %i) {
 entry:
-  %elementptr = getelementptr inbounds [2 x i8*], [2 x i8*]* @bb_array, i32 0, i32 %i
-  %0 = load i8*, i8** %elementptr, align 4
-  indirectbr i8* %0, [label %bb1, label %bb2]
+  %elementptr = getelementptr inbounds [2 x ptr], ptr @bb_array, i32 0, i32 %i
+  %0 = load ptr, ptr %elementptr, align 4
+  indirectbr ptr %0, [label %bb1, label %bb2]
 
 bb1:
   ret i32 111

diff  --git a/llvm/test/CodeGen/Mips/nacl-branch-delay.ll b/llvm/test/CodeGen/Mips/nacl-branch-delay.ll
index b0d7e9c85d9f5..38348d8d49ec4 100644
--- a/llvm/test/CodeGen/Mips/nacl-branch-delay.ll
+++ b/llvm/test/CodeGen/Mips/nacl-branch-delay.ll
@@ -10,7 +10,7 @@ declare void @f2()
 
 
 define void @test1() {
-  %1 = load i32, i32* @x, align 4
+  %1 = load i32, ptr @x, align 4
   call void @f1(i32 %1)
   ret void
 
@@ -43,7 +43,7 @@ define void @test1() {
 
 
 define void @test2() {
-  store i32 1, i32* @x, align 4
+  store i32 1, ptr @x, align 4
   call void @f2()
   ret void
 

diff  --git a/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll b/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll
index efe2a663a3c5a..b3768b0887477 100644
--- a/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll
+++ b/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll
@@ -5,38 +5,38 @@
 @var = external global i32
 
 define void @f() {
-  %val1 = load volatile i32, i32* @var
-  %val2 = load volatile i32, i32* @var
-  %val3 = load volatile i32, i32* @var
-  %val4 = load volatile i32, i32* @var
-  %val5 = load volatile i32, i32* @var
-  %val6 = load volatile i32, i32* @var
-  %val7 = load volatile i32, i32* @var
-  %val8 = load volatile i32, i32* @var
-  %val9 = load volatile i32, i32* @var
-  %val10 = load volatile i32, i32* @var
-  %val11 = load volatile i32, i32* @var
-  %val12 = load volatile i32, i32* @var
-  %val13 = load volatile i32, i32* @var
-  %val14 = load volatile i32, i32* @var
-  %val15 = load volatile i32, i32* @var
-  %val16 = load volatile i32, i32* @var
-  store volatile i32 %val1, i32* @var
-  store volatile i32 %val2, i32* @var
-  store volatile i32 %val3, i32* @var
-  store volatile i32 %val4, i32* @var
-  store volatile i32 %val5, i32* @var
-  store volatile i32 %val6, i32* @var
-  store volatile i32 %val7, i32* @var
-  store volatile i32 %val8, i32* @var
-  store volatile i32 %val9, i32* @var
-  store volatile i32 %val10, i32* @var
-  store volatile i32 %val11, i32* @var
-  store volatile i32 %val12, i32* @var
-  store volatile i32 %val13, i32* @var
-  store volatile i32 %val14, i32* @var
-  store volatile i32 %val15, i32* @var
-  store volatile i32 %val16, i32* @var
+  %val1 = load volatile i32, ptr @var
+  %val2 = load volatile i32, ptr @var
+  %val3 = load volatile i32, ptr @var
+  %val4 = load volatile i32, ptr @var
+  %val5 = load volatile i32, ptr @var
+  %val6 = load volatile i32, ptr @var
+  %val7 = load volatile i32, ptr @var
+  %val8 = load volatile i32, ptr @var
+  %val9 = load volatile i32, ptr @var
+  %val10 = load volatile i32, ptr @var
+  %val11 = load volatile i32, ptr @var
+  %val12 = load volatile i32, ptr @var
+  %val13 = load volatile i32, ptr @var
+  %val14 = load volatile i32, ptr @var
+  %val15 = load volatile i32, ptr @var
+  %val16 = load volatile i32, ptr @var
+  store volatile i32 %val1, ptr @var
+  store volatile i32 %val2, ptr @var
+  store volatile i32 %val3, ptr @var
+  store volatile i32 %val4, ptr @var
+  store volatile i32 %val5, ptr @var
+  store volatile i32 %val6, ptr @var
+  store volatile i32 %val7, ptr @var
+  store volatile i32 %val8, ptr @var
+  store volatile i32 %val9, ptr @var
+  store volatile i32 %val10, ptr @var
+  store volatile i32 %val11, ptr @var
+  store volatile i32 %val12, ptr @var
+  store volatile i32 %val13, ptr @var
+  store volatile i32 %val14, ptr @var
+  store volatile i32 %val15, ptr @var
+  store volatile i32 %val16, ptr @var
   ret void
 
 ; Check that t6, t7 and t8 are used in non-NaCl code.

diff  --git a/llvm/test/CodeGen/Mips/neg1.ll b/llvm/test/CodeGen/Mips/neg1.ll
index dd5d7a09eb281..7f35a9884a10c 100644
--- a/llvm/test/CodeGen/Mips/neg1.ll
+++ b/llvm/test/CodeGen/Mips/neg1.ll
@@ -5,11 +5,11 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %sub = sub nsw i32 0, %0
 ; 16:	neg	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %sub)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
index 4892fb1b48dbf..a71a645a40174 100644
--- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
+++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll
@@ -12,7 +12,7 @@ entry:
   ; Force the float into an odd-numbered register using named registers and
   ; load the vector.
   %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
-  %0 = load volatile <4 x float>, <4 x float>* @v4f32
+  %0 = load volatile <4 x float>, ptr @v4f32
 
   ; Clobber all except $f12/$w12 and $f13
   ;
@@ -25,7 +25,7 @@ entry:
   ; vector.
   call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
   %1 = insertelement <4 x float> %0, float %b, i32 0
-  store <4 x float> %1, <4 x float>* @v4f32
+  store <4 x float> %1, ptr @v4f32
   ret void
 }
 
@@ -46,7 +46,7 @@ entry:
   ; Force the float into an odd-numbered register using named registers and
   ; load the vector.
   %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a)
-  %0 = load volatile <4 x float>, <4 x float>* @v4f32
+  %0 = load volatile <4 x float>, ptr @v4f32
 
   ; Clobber all except $f12/$w12 and $f13
   ;
@@ -59,7 +59,7 @@ entry:
   ; vector.
   call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
   %1 = insertelement <4 x float> %0, float %b, i32 1
-  store <4 x float> %1, <4 x float>* @v4f32
+  store <4 x float> %1, ptr @v4f32
   ret void
 }
 
@@ -77,7 +77,7 @@ entry:
 
 define float @msa_extract_0() {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* @v4f32
+  %0 = load volatile <4 x float>, ptr @v4f32
   %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
 
   ; Clobber all except $f12, and $f13
@@ -105,7 +105,7 @@ entry:
 
 define float @msa_extract_1() {
 entry:
-  %0 = load volatile <4 x float>, <4 x float>* @v4f32
+  %0 = load volatile <4 x float>, ptr @v4f32
   %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0)
 
   ; Clobber all except $f13

diff  --git a/llvm/test/CodeGen/Mips/nomips16.ll b/llvm/test/CodeGen/Mips/nomips16.ll
index de668d663cd56..d05f37e39bf41 100644
--- a/llvm/test/CodeGen/Mips/nomips16.ll
+++ b/llvm/test/CodeGen/Mips/nomips16.ll
@@ -6,11 +6,11 @@
 ; Function Attrs: nounwind
 define void @foo() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %conv = fpext float %0 to double
   %add = fadd double %conv, 1.500000e+00
   %conv1 = fptrunc double %add to float
-  store float %conv1, float* @x, align 4
+  store float %conv1, ptr @x, align 4
   ret void
 }
 ; CHECK: 	.ent	foo
@@ -20,11 +20,11 @@ entry:
 ; Function Attrs: nounwind
 define void @nofoo() #1 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %conv = fpext float %0 to double
   %add = fadd double %conv, 3.900000e+00
   %conv1 = fptrunc double %add to float
-  store float %conv1, float* @x, align 4
+  store float %conv1, ptr @x, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/not1.ll b/llvm/test/CodeGen/Mips/not1.ll
index 5124805082729..30b6f45a3b218 100644
--- a/llvm/test/CodeGen/Mips/not1.ll
+++ b/llvm/test/CodeGen/Mips/not1.ll
@@ -6,11 +6,11 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   %neg = xor i32 %0, -1
 ; 16:	not	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %neg)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %neg)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
index de17a1e502f43..072a7aaee33b4 100644
--- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll
+++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll
@@ -81,21 +81,20 @@ define void @f1() nounwind {
 ; CHECK-NEXT:    addiu $sp, $sp, 64
 entry:
   %agg.tmp10 = alloca %struct.S3, align 4
-  call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
-  call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind
-  %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0
-  store i8 11, i8* %tmp11, align 4
-  call void @callee3(float 2.100000e+01, %struct.S3* byval(%struct.S3) %agg.tmp10, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
+  call void @callee1(float 2.000000e+01, ptr byval(%struct.S1) @f1.s1) nounwind
+  call void @callee2(ptr byval(%struct.S2) @f1.s2) nounwind
+  store i8 11, ptr %agg.tmp10, align 4
+  call void @callee3(float 2.100000e+01, ptr byval(%struct.S3) %agg.tmp10, ptr byval(%struct.S1) @f1.s1) nounwind
   ret void
 }
 
-declare void @callee1(float, %struct.S1* byval(%struct.S1))
+declare void @callee1(float, ptr byval(%struct.S1))
 
-declare void @callee2(%struct.S2* byval(%struct.S2))
+declare void @callee2(ptr byval(%struct.S2))
 
-declare void @callee3(float, %struct.S3* byval(%struct.S3), %struct.S1* byval(%struct.S1))
+declare void @callee3(float, ptr byval(%struct.S3), ptr byval(%struct.S1))
 
-define void @f2(float %f, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
+define void @f2(float %f, ptr nocapture byval(%struct.S1) %s1) nounwind {
 ; CHECK-LABEL: f2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui $2, %hi(_gp_disp)
@@ -126,25 +125,24 @@ define void @f2(float %f, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 48
 entry:
-  %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
-  %tmp = load i32, i32* %i2, align 4
-  %d = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 4
-  %tmp1 = load double, double* %d, align 8
-  %ll = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 3
-  %tmp2 = load i64, i64* %ll, align 8
-  %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
-  %tmp3 = load i32, i32* %i, align 4
-  %s = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1
-  %tmp4 = load i16, i16* %s, align 2
-  %c = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 0
-  %tmp5 = load i8, i8* %c, align 1
+  %i2 = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 5
+  %tmp = load i32, ptr %i2, align 4
+  %d = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 4
+  %tmp1 = load double, ptr %d, align 8
+  %ll = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 3
+  %tmp2 = load i64, ptr %ll, align 8
+  %i = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 2
+  %tmp3 = load i32, ptr %i, align 4
+  %s = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 1
+  %tmp4 = load i16, ptr %s, align 2
+  %tmp5 = load i8, ptr %s1, align 1
   tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
   ret void
 }
 
 declare void @callee4(i32, double, i64, i32, i16 signext, i8 signext, float)
 
-define void @f3(%struct.S2* nocapture byval(%struct.S2) %s2) nounwind {
+define void @f3(ptr nocapture byval(%struct.S2) %s2) nounwind {
 ; CHECK-LABEL: f3:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui $2, %hi(_gp_disp)
@@ -176,15 +174,14 @@ define void @f3(%struct.S2* nocapture byval(%struct.S2) %s2) nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 48
 entry:
-  %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0
-  %tmp = load i32, i32* %arrayidx, align 4
-  %arrayidx2 = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 3
-  %tmp3 = load i32, i32* %arrayidx2, align 4
+  %tmp = load i32, ptr %s2, align 4
+  %arrayidx2 = getelementptr inbounds %struct.S2, ptr %s2, i32 0, i32 0, i32 3
+  %tmp3 = load i32, ptr %arrayidx2, align 4
   tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
   ret void
 }
 
-define void @f4(float %f, %struct.S3* nocapture byval(%struct.S3) %s3, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind {
+define void @f4(float %f, ptr nocapture byval(%struct.S3) %s3, ptr nocapture byval(%struct.S1) %s1) nounwind {
 ; CHECK-LABEL: f4:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui $2, %hi(_gp_disp)
@@ -218,19 +215,18 @@ define void @f4(float %f, %struct.S3* nocapture byval(%struct.S3) %s3, %struct.S
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 48
 entry:
-  %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
-  %tmp = load i32, i32* %i, align 4
-  %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
-  %tmp1 = load i32, i32* %i2, align 4
-  %c = getelementptr inbounds %struct.S3, %struct.S3* %s3, i32 0, i32 0
-  %tmp2 = load i8, i8* %c, align 1
+  %i = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 2
+  %tmp = load i32, ptr %i, align 4
+  %i2 = getelementptr inbounds %struct.S1, ptr %s1, i32 0, i32 5
+  %tmp1 = load i32, ptr %i2, align 4
+  %tmp2 = load i8, ptr %s3, align 1
   tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
   ret void
 }
 
 %struct.S4 = type { [4 x i32] }
 
-define void @f5(i64 %a0, %struct.S4* nocapture byval(%struct.S4) %a1) nounwind {
+define void @f5(i64 %a0, ptr nocapture byval(%struct.S4) %a1) nounwind {
 ; CHECK-LABEL: f5:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui $2, %hi(_gp_disp)
@@ -252,8 +248,8 @@ define void @f5(i64 %a0, %struct.S4* nocapture byval(%struct.S4) %a1) nounwind {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    addiu $sp, $sp, 32
 entry:
-  tail call void @f6(%struct.S4* byval(%struct.S4) %a1, i64 %a0) nounwind
+  tail call void @f6(ptr byval(%struct.S4) %a1, i64 %a0) nounwind
   ret void
 }
 
-declare void @f6(%struct.S4* nocapture byval(%struct.S4), i64)
+declare void @f6(ptr nocapture byval(%struct.S4), i64)

diff  --git a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll
index 27d454f31d98c..750457eac5e92 100644
--- a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll
+++ b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll
@@ -8,23 +8,21 @@
 ; variable argument is returned from the correct stack location.
 
 
-declare void @llvm.va_start(i8*) nounwind
-declare void @llvm.va_end(i8*) nounwind
+declare void @llvm.va_start(ptr) nounwind
+declare void @llvm.va_end(ptr) nounwind
 
 ; return int
 define i32 @va1(i32 %a, ...) nounwind {
 entry:
   %a.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %b = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, i32
-  store i32 %0, i32* %b, align 4
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load i32, i32* %b, align 4
+  store i32 %a, ptr %a.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, i32
+  store i32 %0, ptr %b, align 4
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load i32, ptr %b, align 4
   ret i32 %tmp
 
 ; CHECK-LABEL: va1:
@@ -41,16 +39,14 @@ entry:
 define double @va2(i32 %a, ...) nounwind {
 entry:
   %a.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %b = alloca double, align 8
-  store i32 %a, i32* %a.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, double
-  store double %0, double* %b, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load double, double* %b, align 8
+  store i32 %a, ptr %a.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, double
+  store double %0, ptr %b, align 8
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load double, ptr %b, align 8
   ret double %tmp
 
 ; CHECK-LABEL: va2:
@@ -69,16 +65,14 @@ entry:
 define i32 @va3(double %a, ...) nounwind {
 entry:
   %a.addr = alloca double, align 8
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %b = alloca i32, align 4
-  store double %a, double* %a.addr, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, i32
-  store i32 %0, i32* %b, align 4
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load i32, i32* %b, align 4
+  store double %a, ptr %a.addr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, i32
+  store i32 %0, ptr %b, align 4
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load i32, ptr %b, align 4
   ret i32 %tmp
 
 ; CHECK-LABEL: va3:
@@ -92,16 +86,14 @@ entry:
 define double @va4(double %a, ...) nounwind {
 entry:
   %a.addr = alloca double, align 8
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %b = alloca double, align 8
-  store double %a, double* %a.addr, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, double
-  store double %0, double* %b, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load double, double* %b, align 8
+  store double %a, ptr %a.addr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, double
+  store double %0, ptr %b, align 8
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load double, ptr %b, align 8
   ret double %tmp
 
 ; CHECK-LABEL: va4:
@@ -118,18 +110,16 @@ entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %c.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %d = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  store i32 %c, i32* %c.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, i32
-  store i32 %0, i32* %d, align 4
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load i32, i32* %d, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  store i32 %c, ptr %c.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, i32
+  store i32 %0, ptr %d, align 4
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load i32, ptr %d, align 4
   ret i32 %tmp
 
 ; CHECK-LABEL: va5:
@@ -144,18 +134,16 @@ entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
   %c.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %d = alloca double, align 8
-  store i32 %a, i32* %a.addr, align 4
-  store i32 %b, i32* %b.addr, align 4
-  store i32 %c, i32* %c.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, double
-  store double %0, double* %d, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load double, double* %d, align 8
+  store i32 %a, ptr %a.addr, align 4
+  store i32 %b, ptr %b.addr, align 4
+  store i32 %c, ptr %c.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, double
+  store double %0, ptr %d, align 8
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load double, ptr %d, align 8
   ret double %tmp
 
 ; CHECK-LABEL: va6:
@@ -173,17 +161,15 @@ define i32 @va7(i32 %a, double %b, ...) nounwind {
 entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca double, align 8
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %c = alloca i32, align 4
-  store i32 %a, i32* %a.addr, align 4
-  store double %b, double* %b.addr, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, i32
-  store i32 %0, i32* %c, align 4
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load i32, i32* %c, align 4
+  store i32 %a, ptr %a.addr, align 4
+  store double %b, ptr %b.addr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, i32
+  store i32 %0, ptr %c, align 4
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load i32, ptr %c, align 4
   ret i32 %tmp
 
 ; CHECK-LABEL: va7:
@@ -196,17 +182,15 @@ define double @va8(i32 %a, double %b, ...) nounwind {
 entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca double, align 8
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %c = alloca double, align 8
-  store i32 %a, i32* %a.addr, align 4
-  store double %b, double* %b.addr, align 8
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, double
-  store double %0, double* %c, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load double, double* %c, align 8
+  store i32 %a, ptr %a.addr, align 4
+  store double %b, ptr %b.addr, align 8
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, double
+  store double %0, ptr %c, align 8
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load double, ptr %c, align 8
   ret double %tmp
 
 ; CHECK-LABEL: va8:
@@ -221,18 +205,16 @@ entry:
   %a.addr = alloca double, align 8
   %b.addr = alloca double, align 8
   %c.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %d = alloca i32, align 4
-  store double %a, double* %a.addr, align 8
-  store double %b, double* %b.addr, align 8
-  store i32 %c, i32* %c.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, i32
-  store i32 %0, i32* %d, align 4
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load i32, i32* %d, align 4
+  store double %a, ptr %a.addr, align 8
+  store double %b, ptr %b.addr, align 8
+  store i32 %c, ptr %c.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, i32
+  store i32 %0, ptr %d, align 4
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load i32, ptr %d, align 4
   ret i32 %tmp
 
 ; CHECK-LABEL: va9:
@@ -246,18 +228,16 @@ entry:
   %a.addr = alloca double, align 8
   %b.addr = alloca double, align 8
   %c.addr = alloca i32, align 4
-  %ap = alloca i8*, align 4
+  %ap = alloca ptr, align 4
   %d = alloca double, align 8
-  store double %a, double* %a.addr, align 8
-  store double %b, double* %b.addr, align 8
-  store i32 %c, i32* %c.addr, align 4
-  %ap1 = bitcast i8** %ap to i8*
-  call void @llvm.va_start(i8* %ap1)
-  %0 = va_arg i8** %ap, double
-  store double %0, double* %d, align 8
-  %ap2 = bitcast i8** %ap to i8*
-  call void @llvm.va_end(i8* %ap2)
-  %tmp = load double, double* %d, align 8
+  store double %a, ptr %a.addr, align 8
+  store double %b, ptr %b.addr, align 8
+  store i32 %c, ptr %c.addr, align 4
+  call void @llvm.va_start(ptr %ap)
+  %0 = va_arg ptr %ap, double
+  store double %0, ptr %d, align 8
+  call void @llvm.va_end(ptr %ap)
+  %tmp = load double, ptr %d, align 8
   ret double %tmp
 
 ; CHECK-LABEL: va10:

diff  --git a/llvm/test/CodeGen/Mips/octeon.ll b/llvm/test/CodeGen/Mips/octeon.ll
index eb16991604860..11e93736f5cf0 100644
--- a/llvm/test/CodeGen/Mips/octeon.ll
+++ b/llvm/test/CodeGen/Mips/octeon.ll
@@ -187,7 +187,7 @@ entry:
 ; OCTEON: bbit0 $1, 1, [[BB0:(\$|\.L)BB[0-9_]+]]
 ; OCTEON-PIC-NOT: b  {{[[:space:]].*}}
 ; OCTEON-NOT: j  {{[[:space:]].*}}
-  %0 = load i64, i64* @var, align 8
+  %0 = load i64, ptr @var, align 8
   %and = and i64 %0, 2
   %tobool = icmp eq i64 %and, 0
   br i1 %tobool, label %if.end, label %if.then
@@ -208,7 +208,7 @@ entry:
 ; OCTEON: bbit1 $1, 1, [[BB0:(\$|\.L)BB[0-9_]+]]
 ; OCTEON-PIC-NOT: b  {{[[:space:]].*}}
 ; OCTEON-NOT: j  {{[[:space:]].*}}
-  %0 = load i64, i64* @var, align 8
+  %0 = load i64, ptr @var, align 8
   %and = and i64 %0, 2
   %tobool = icmp eq i64 %and, 0
   br i1 %tobool, label %if.then, label %if.end

diff  --git a/llvm/test/CodeGen/Mips/optimize-pic-o0.ll b/llvm/test/CodeGen/Mips/optimize-pic-o0.ll
index 8790b8e92b74e..8c23315d58d52 100644
--- a/llvm/test/CodeGen/Mips/optimize-pic-o0.ll
+++ b/llvm/test/CodeGen/Mips/optimize-pic-o0.ll
@@ -5,28 +5,28 @@ define i32 @main()  {
 entry:
   %retval = alloca i32, align 4
   %i = alloca i32, align 4
-  store i32 0, i32* %retval
-  store i32 0, i32* %i, align 4
+  store i32 0, ptr %retval
+  store i32 0, ptr %i, align 4
   br label %for.cond
 
 for.cond:                                         ; preds = %for.inc, %entry
-  %0 = load i32, i32* %i, align 4
+  %0 = load i32, ptr %i, align 4
   %cmp = icmp slt i32 %0, 10
   br i1 %cmp, label %for.body, label %for.end
 
 for.body:                                         ; preds = %for.cond
-  call void bitcast (void (...)* @foo to void ()*)()
+  call void @foo()
 ; CHECK: jalr $25
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body
-  %1 = load i32, i32* %i, align 4
+  %1 = load i32, ptr %i, align 4
   %inc = add nsw i32 %1, 1
-  store i32 %inc, i32* %i, align 4
+  store i32 %inc, ptr %i, align 4
   br label %for.cond
 
 for.end:                                          ; preds = %for.cond
-  %2 = load i32, i32* %retval
+  %2 = load i32, ptr %retval
   ret i32 %2
 }
 

diff  --git a/llvm/test/CodeGen/Mips/or1.ll b/llvm/test/CodeGen/Mips/or1.ll
index aabffd111c5e0..94e314491a7c8 100644
--- a/llvm/test/CodeGen/Mips/or1.ll
+++ b/llvm/test/CodeGen/Mips/or1.ll
@@ -6,12 +6,12 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
-  %1 = load i32, i32* @y, align 4
+  %0 = load i32, ptr @x, align 4
+  %1 = load i32, ptr @y, align 4
   %or = or i32 %0, %1
 ; 16:	or	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %or)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %or)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/overflow-intrinsic-optimizations.ll b/llvm/test/CodeGen/Mips/overflow-intrinsic-optimizations.ll
index a0ac1db2e07df..4bbb9cccc84d7 100644
--- a/llvm/test/CodeGen/Mips/overflow-intrinsic-optimizations.ll
+++ b/llvm/test/CodeGen/Mips/overflow-intrinsic-optimizations.ll
@@ -1,6 +1,6 @@
 ; RUN: llc %s -mtriple=mipsel -o - | FileCheck %s
 
-define i1 @no__mulodi4(i32 %a, i64 %b, i32* %c) {
+define i1 @no__mulodi4(i32 %a, i64 %b, ptr %c) {
 ; CHECK-LABEL: no__mulodi4
 ; CHECK-NOT: jal __mulodi4
 ; CHECK-NOT: jal __multi3
@@ -13,7 +13,7 @@ entry:
   %5 = sext i32 %4 to i64
   %6 = icmp ne i64 %3, %5
   %7 = or i1 %2, %6
-  store i32 %4, i32* %c, align 4
+  store i32 %4, ptr %c, align 4
   ret i1 %7
 }
 

diff  --git a/llvm/test/CodeGen/Mips/pr33682.ll b/llvm/test/CodeGen/Mips/pr33682.ll
index 0209ac39ea8bc..93ef4f900db07 100644
--- a/llvm/test/CodeGen/Mips/pr33682.ll
+++ b/llvm/test/CodeGen/Mips/pr33682.ll
@@ -7,9 +7,9 @@
 ; BE: lw $2, 4($4)
 ; LE: lw $2, 0($4)
 
-define i32 @a(<2 x i32> * %a) {
+define i32 @a(ptr %a) {
 entry:
-%0 = load <2 x i32>, <2 x i32> * %a
+%0 = load <2 x i32>, ptr %a
 %1 = bitcast <2 x i32> %0 to i64
 %2 = trunc i64 %1 to i32
 ret i32 %2
@@ -19,9 +19,9 @@ ret i32 %2
 ; BE: lw $2, 12($4)
 ; LE: lw $2, 0($4)
 
-define i32 @b(<4 x i32> * %a) {
+define i32 @b(ptr %a) {
 entry:
-%0 = load <4 x i32>, <4 x i32> * %a
+%0 = load <4 x i32>, ptr %a
 %1 = bitcast <4 x i32> %0 to i128
 %2 = trunc i128 %1 to i32
 ret i32 %2
@@ -34,9 +34,9 @@ ret i32 %2
 ; BE: lw $2, 0($4)
 ; LE: lw $2, 0($4)
 
-define i32 @c(i64 * %a) {
+define i32 @c(ptr %a) {
 entry:
-%0 = load i64, i64 * %a
+%0 = load i64, ptr %a
 %1 = bitcast i64 %0 to <2 x i32>
 %2 = extractelement <2 x i32> %1, i32 0
 ret i32 %2
@@ -46,9 +46,9 @@ ret i32 %2
 ; BE: lw $2, 4($4)
 ; LE: lw $2, 4($4)
 
-define i32 @d(i64 * %a) {
+define i32 @d(ptr %a) {
 entry:
-%0 = load i64, i64 * %a
+%0 = load i64, ptr %a
 %1 = bitcast i64 %0 to <2 x i32>
 %2 = extractelement <2 x i32> %1, i32 1
 ret i32 %2

diff  --git a/llvm/test/CodeGen/Mips/pr33978.ll b/llvm/test/CodeGen/Mips/pr33978.ll
index c3d6ee51c6e56..921fa543cfda9 100644
--- a/llvm/test/CodeGen/Mips/pr33978.ll
+++ b/llvm/test/CodeGen/Mips/pr33978.ll
@@ -5,16 +5,15 @@
 ; This could result in one of the pointers being considered dereferenceable
 ; and other not.
 
-define void @foo(i8*) {
+define void @foo(ptr) {
 start:
   %a = alloca [22 x i8]
   %b = alloca [22 x i8]
-  %c = bitcast [22 x i8]* %a to i8*
-  %d = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 2
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %c, i8* %d, i32 20, i1 false)
-  %e = getelementptr inbounds [22 x i8], [22 x i8]* %b, i32 0, i32 6
-  call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %e, i32 12, i1 false)
+  %d = getelementptr inbounds [22 x i8], ptr %b, i32 0, i32 2
+  call void @llvm.memcpy.p0.p0.i32(ptr %a, ptr %d, i32 20, i1 false)
+  %e = getelementptr inbounds [22 x i8], ptr %b, i32 0, i32 6
+  call void @llvm.memcpy.p0.p0.i32(ptr %0, ptr %e, i32 12, i1 false)
   ret void
 }
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i1)
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)

diff  --git a/llvm/test/CodeGen/Mips/pr34975.ll b/llvm/test/CodeGen/Mips/pr34975.ll
index b976e1213dd77..afac4c12c8046 100644
--- a/llvm/test/CodeGen/Mips/pr34975.ll
+++ b/llvm/test/CodeGen/Mips/pr34975.ll
@@ -8,7 +8,7 @@
 
 define void @e() !dbg !19 {
 entry:
-  %0 = load i32, i32* @c, align 4, !dbg !28, !tbaa !31
+  %0 = load i32, ptr @c, align 4, !dbg !28, !tbaa !31
   %tobool8 = icmp eq i32 %0, 0, !dbg !35
   br i1 %tobool8, label %for.end, label %for.body.preheader, !dbg !35
 
@@ -16,7 +16,7 @@ for.body.preheader:                               ; preds = %entry
   br label %for.body, !dbg !36
 
 for.body:                                         ; preds = %for.body.preheader
-  %1 = load i8, i8* undef, align 1, !dbg !36, !tbaa !38
+  %1 = load i8, ptr undef, align 1, !dbg !36, !tbaa !38
   %conv = zext i8 %1 to i32, !dbg !36
   %cmp = icmp sgt i32 %0, %conv, !dbg !39
   br i1 %cmp, label %if.end, label %if.then, !dbg !40
@@ -24,7 +24,7 @@ for.body:                                         ; preds = %for.body.preheader
 if.then:                                          ; preds = %for.body
   tail call void @llvm.dbg.value(metadata i32 %conv, metadata !41, metadata !DIExpression()), !dbg !43
   %idxprom5 = zext i8 %1 to i64, !dbg !44
-  %call = tail call i32 bitcast (i32 (...)* @g to i32 (i32)*)(i32 signext undef) #3, !dbg !45
+  %call = tail call i32 @g(i32 signext undef) #3, !dbg !45
   br label %if.end, !dbg !46
 
 if.end:                                           ; preds = %if.then, %for.body

diff  --git a/llvm/test/CodeGen/Mips/pr35071.ll b/llvm/test/CodeGen/Mips/pr35071.ll
index 2681e1d3eade7..182ff5a4b0954 100644
--- a/llvm/test/CodeGen/Mips/pr35071.ll
+++ b/llvm/test/CodeGen/Mips/pr35071.ll
@@ -13,19 +13,19 @@ entry:
   br i1 %tobool, label %if.end, label %cleanup7.critedge, !dbg !21
 
 if.end:                                           ; preds = %entry
-  %call6 = call i32 bitcast (i32 (...)* @j to i32 (i32)*)(i32 signext %conv)
+  %call6 = call i32 @j(i32 signext %conv)
 #4, !dbg !22
   br label %cleanup7, !dbg !23
 
 cleanup7.critedge:                                ; preds = %entry
-  call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull undef) #4, !dbg !24
+  call void @llvm.lifetime.end.p0(i64 4, ptr nonnull undef) #4, !dbg !24
   br label %cleanup7
 
 cleanup7:                                         ; preds = %cleanup7.critedge,
   ret void
 }
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
 
 declare i32 @j(...)
 

diff  --git a/llvm/test/CodeGen/Mips/pr42736.ll b/llvm/test/CodeGen/Mips/pr42736.ll
index c0dbadb507073..1c8dc114a3984 100644
--- a/llvm/test/CodeGen/Mips/pr42736.ll
+++ b/llvm/test/CodeGen/Mips/pr42736.ll
@@ -20,8 +20,8 @@ entry:
 ; STATIC-NEXT: sd      $[[R0]]
 
   %val = alloca i64, align 8
-  store i64 and (i64 ptrtoint (void ()* @foo to i64), i64 268435455), i64* %val, align 8
-  %0 = load i64, i64* %val, align 8
+  store i64 and (i64 ptrtoint (ptr @foo to i64), i64 268435455), ptr %val, align 8
+  %0 = load i64, ptr %val, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/prevent-hoisting.ll b/llvm/test/CodeGen/Mips/prevent-hoisting.ll
index 1fc7462811cb2..e44b895689b49 100644
--- a/llvm/test/CodeGen/Mips/prevent-hoisting.ll
+++ b/llvm/test/CodeGen/Mips/prevent-hoisting.ll
@@ -26,27 +26,27 @@
 ; CHECK:           lw      $[[R1]], %got(assignSE2partition)($[[R2]])
 
 
-%struct.img_par = type { i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], i8**, i32*, i32***, i32**, i32, i32, i32, i32, %struct.Slice*, %struct.macroblock*, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32***, i32***, i32****, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.datapartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (%struct.img_par*, %struct.inp_par*)*, i32, i32, i32, i32 }
-%struct.datapartition = type { %struct.Bitstream*, %struct.DecodingEnvironment, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)* }
-%struct.Bitstream = type { i32, i32, i32, i32, i8*, i32 }
-%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32* }
-%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, void (i32, i32, i32*, i32*)*, void (%struct.syntaxelement*, %struct.img_par*, %struct.DecodingEnvironment*)* }
+%struct.img_par = type { i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], ptr, ptr, ptr, ptr, i32, i32, i32, i32, ptr, ptr, i32, i32, i32, i32, i32, i32, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i32, i32, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr, ptr, i32, ptr, ptr, ptr, i32, ptr, ptr, ptr, ptr, i32, i32, i32, i32 }
+%struct.datapartition = type { ptr, %struct.DecodingEnvironment, ptr }
+%struct.Bitstream = type { i32, i32, i32, i32, ptr, i32 }
+%struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, ptr, ptr }
+%struct.syntaxelement = type { i32, i32, i32, i32, i32, i32, i32, i32, ptr, ptr }
 %struct.MotionInfoContexts = type { [4 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
 %struct.BiContextType = type { i16, i8 }
 %struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
 %struct.inp_par = type { [1000 x i8], [1000 x i8], [1000 x i8], i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.macroblock = type { i32, [2 x i32], i32, i32, %struct.macroblock*, %struct.macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_s* }
+%struct.macroblock = type { i32, [2 x i32], i32, i32, ptr, ptr, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+%struct.DecRefPicMarking_s = type { i32, i32, i32, i32, i32, ptr }
 %struct.timeb = type { i32, i16, i16, i16 }
 
 @assignSE2partition = external global [0 x [20 x i32]]
 @FIELD_SCAN8x8 = external constant [64 x [2 x i8]]
 
 
-define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) {
+define void @readLumaCoeff8x8_CABAC(ptr %img, i32 %b8) {
 
-  %1 = load i32, i32* undef, align 4
+  %1 = load i32, ptr undef, align 4
   br i1 false, label %2, label %3
 
 ; <label>:2                                       ; preds = %0
@@ -65,7 +65,7 @@ switch.lookup:                                    ; preds = %3
   br label %6
 
 ; <label>:6                                       ; preds = %5, %4
-  %7 = phi [2 x i8]* [ getelementptr inbounds ([64 x [2 x i8]], [64 x [2 x i8]]* @FIELD_SCAN8x8, i32 0, i32 0), %4 ], [ null, %5 ]
+  %7 = phi ptr [ @FIELD_SCAN8x8, %4 ], [ null, %5 ]
   br i1 undef, label %switch.lookup6, label %8
 
 switch.lookup6:                                   ; preds = %6
@@ -77,7 +77,7 @@ switch.lookup6:                                   ; preds = %6
 ; <label>:9                                       ; preds = %8
   %10 = and i32 %b8, 1
   %11 = shl nuw nsw i32 %10, 3
-  %12 = getelementptr inbounds %struct.Slice, %struct.Slice* null, i32 0, i32 9
+  %12 = getelementptr inbounds %struct.Slice, ptr null, i32 0, i32 9
   br i1 undef, label %.preheader, label %.preheader11
 
 .preheader11:                                     ; preds = %21, %9
@@ -92,20 +92,20 @@ switch.lookup6:                                   ; preds = %6
   br label %15
 
 ; <label>:15                                      ; preds = %14, %13
-  %16 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
-  %17 = load i32, i32* %16, align 4
-  %18 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %17, i32 2
-  %19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)*, i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4
-  %20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef)
+  %16 = getelementptr inbounds [0 x [20 x i32]], ptr @assignSE2partition, i32 0, i32 %1, i32 undef
+  %17 = load i32, ptr %16, align 4
+  %18 = getelementptr inbounds %struct.datapartition, ptr null, i32 %17, i32 2
+  %19 = load ptr, ptr %18, align 4
+  %20 = call i32 %19(ptr undef, ptr %img, ptr undef)
   br i1 false, label %.loopexit, label %21
 
 ; <label>:21                                      ; preds = %15
   %22 = add i32 %coef_ctr.013, 1
   %23 = add i32 %22, 0
-  %24 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %23, i32 0
+  %24 = getelementptr inbounds [2 x i8], ptr %7, i32 %23, i32 0
   %25 = add nsw i32 0, %11
-  %26 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
-  store i32 0, i32* %26, align 4
+  %26 = getelementptr inbounds %struct.img_par, ptr %img, i32 0, i32 27, i32 undef, i32 %25
+  store i32 0, ptr %26, align 4
   %27 = add nsw i32 %k.014, 1
   %28 = icmp slt i32 %27, 65
   br i1 %28, label %.preheader11, label %.loopexit
@@ -122,22 +122,22 @@ switch.lookup6:                                   ; preds = %6
   br label %31
 
 ; <label>:31                                      ; preds = %30, %29
-  %32 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
-  %33 = load i32, i32* %32, align 4
-  %34 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %33
-  %35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34)
+  %32 = getelementptr inbounds [0 x [20 x i32]], ptr @assignSE2partition, i32 0, i32 %1, i32 undef
+  %33 = load i32, ptr %32, align 4
+  %34 = getelementptr inbounds %struct.datapartition, ptr null, i32 %33
+  %35 = call i32 undef(ptr undef, ptr %img, ptr %34)
   br i1 false, label %.loopexit, label %36
 
 ; <label>:36                                      ; preds = %31
-  %37 = load i32, i32* undef, align 4
+  %37 = load i32, ptr undef, align 4
   %38 = add i32 %coef_ctr.29, 1
   %39 = add i32 %38, %37
-  %40 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %39, i32 0
-  %41 = load i8, i8* %40, align 1
+  %40 = getelementptr inbounds [2 x i8], ptr %7, i32 %39, i32 0
+  %41 = load i8, ptr %40, align 1
   %42 = zext i8 %41 to i32
   %43 = add nsw i32 %42, %11
-  %44 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
-  store i32 0, i32* %44, align 4
+  %44 = getelementptr inbounds %struct.img_par, ptr %img, i32 0, i32 27, i32 undef, i32 %43
+  store i32 0, ptr %44, align 4
   %45 = add nsw i32 %k.110, 1
   %46 = icmp slt i32 %45, 65
   br i1 %46, label %.preheader, label %.loopexit

diff  --git a/llvm/test/CodeGen/Mips/private-addr.ll b/llvm/test/CodeGen/Mips/private-addr.ll
index 37dd6fe53c40b..d5e426274666d 100644
--- a/llvm/test/CodeGen/Mips/private-addr.ll
+++ b/llvm/test/CodeGen/Mips/private-addr.ll
@@ -4,11 +4,11 @@ define private void @bar() {
   ret void
 }
 
-define void()* @foo() {
+define ptr @foo() {
 ; CHECK:      foo:
 ; CHECK:      lw     $[[REG:.*]], %got($bar)($1)
 ; CHECK-NEXT: jr     $ra
 ; CHECK-NEXT: addiu  $2, $[[REG]], %lo($bar)
 
-  ret void()* @bar
+  ret ptr @bar
 }

diff  --git a/llvm/test/CodeGen/Mips/private.ll b/llvm/test/CodeGen/Mips/private.ll
index 07affbf30c38d..e93c2d7fc5d9e 100644
--- a/llvm/test/CodeGen/Mips/private.ll
+++ b/llvm/test/CodeGen/Mips/private.ll
@@ -15,6 +15,6 @@ define i32 @bar() {
 ; CHECK: lw $[[R0:[0-9]+]], %got($baz)($
 ; CHECK: lw ${{[0-9]+}}, %lo($baz)($[[R0]])
   call void @foo()
-  %1 = load i32, i32* @baz, align 4
+  %1 = load i32, ptr @baz, align 4
   ret i32 %1
 }

diff  --git a/llvm/test/CodeGen/Mips/ra-allocatable.ll b/llvm/test/CodeGen/Mips/ra-allocatable.ll
index 048d4325a4114..dcbf90393576a 100644
--- a/llvm/test/CodeGen/Mips/ra-allocatable.ll
+++ b/llvm/test/CodeGen/Mips/ra-allocatable.ll
@@ -1,95 +1,95 @@
 ; RUN: llc  < %s -march=mipsel | FileCheck %s
 
 @a0 = external global i32
- at b0 = external global i32*
+ at b0 = external global ptr
 @a1 = external global i32
- at b1 = external global i32*
+ at b1 = external global ptr
 @a2 = external global i32
- at b2 = external global i32*
+ at b2 = external global ptr
 @a3 = external global i32
- at b3 = external global i32*
+ at b3 = external global ptr
 @a4 = external global i32
- at b4 = external global i32*
+ at b4 = external global ptr
 @a5 = external global i32
- at b5 = external global i32*
+ at b5 = external global ptr
 @a6 = external global i32
- at b6 = external global i32*
+ at b6 = external global ptr
 @a7 = external global i32
- at b7 = external global i32*
+ at b7 = external global ptr
 @a8 = external global i32
- at b8 = external global i32*
+ at b8 = external global ptr
 @a9 = external global i32
- at b9 = external global i32*
+ at b9 = external global ptr
 @a10 = external global i32
- at b10 = external global i32*
+ at b10 = external global ptr
 @a11 = external global i32
- at b11 = external global i32*
+ at b11 = external global ptr
 @a12 = external global i32
- at b12 = external global i32*
+ at b12 = external global ptr
 @a13 = external global i32
- at b13 = external global i32*
+ at b13 = external global ptr
 @a14 = external global i32
- at b14 = external global i32*
+ at b14 = external global ptr
 @a15 = external global i32
- at b15 = external global i32*
+ at b15 = external global ptr
 @a16 = external global i32
- at b16 = external global i32*
+ at b16 = external global ptr
 @a17 = external global i32
- at b17 = external global i32*
+ at b17 = external global ptr
 @a18 = external global i32
- at b18 = external global i32*
+ at b18 = external global ptr
 @a19 = external global i32
- at b19 = external global i32*
+ at b19 = external global ptr
 @a20 = external global i32
- at b20 = external global i32*
+ at b20 = external global ptr
 @a21 = external global i32
- at b21 = external global i32*
+ at b21 = external global ptr
 @a22 = external global i32
- at b22 = external global i32*
+ at b22 = external global ptr
 @a23 = external global i32
- at b23 = external global i32*
+ at b23 = external global ptr
 @a24 = external global i32
- at b24 = external global i32*
+ at b24 = external global ptr
 @a25 = external global i32
- at b25 = external global i32*
+ at b25 = external global ptr
 @a26 = external global i32
- at b26 = external global i32*
+ at b26 = external global ptr
 @a27 = external global i32
- at b27 = external global i32*
+ at b27 = external global ptr
 @a28 = external global i32
- at b28 = external global i32*
+ at b28 = external global ptr
 @a29 = external global i32
- at b29 = external global i32*
- at c0 = external global i32*
- at c1 = external global i32*
- at c2 = external global i32*
- at c3 = external global i32*
- at c4 = external global i32*
- at c5 = external global i32*
- at c6 = external global i32*
- at c7 = external global i32*
- at c8 = external global i32*
- at c9 = external global i32*
- at c10 = external global i32*
- at c11 = external global i32*
- at c12 = external global i32*
- at c13 = external global i32*
- at c14 = external global i32*
- at c15 = external global i32*
- at c16 = external global i32*
- at c17 = external global i32*
- at c18 = external global i32*
- at c19 = external global i32*
- at c20 = external global i32*
- at c21 = external global i32*
- at c22 = external global i32*
- at c23 = external global i32*
- at c24 = external global i32*
- at c25 = external global i32*
- at c26 = external global i32*
- at c27 = external global i32*
- at c28 = external global i32*
- at c29 = external global i32*
+ at b29 = external global ptr
+ at c0 = external global ptr
+ at c1 = external global ptr
+ at c2 = external global ptr
+ at c3 = external global ptr
+ at c4 = external global ptr
+ at c5 = external global ptr
+ at c6 = external global ptr
+ at c7 = external global ptr
+ at c8 = external global ptr
+ at c9 = external global ptr
+ at c10 = external global ptr
+ at c11 = external global ptr
+ at c12 = external global ptr
+ at c13 = external global ptr
+ at c14 = external global ptr
+ at c15 = external global ptr
+ at c16 = external global ptr
+ at c17 = external global ptr
+ at c18 = external global ptr
+ at c19 = external global ptr
+ at c20 = external global ptr
+ at c21 = external global ptr
+ at c22 = external global ptr
+ at c23 = external global ptr
+ at c24 = external global ptr
+ at c25 = external global ptr
+ at c26 = external global ptr
+ at c27 = external global ptr
+ at c28 = external global ptr
+ at c29 = external global ptr
 
 define i32 @f1() nounwind {
 entry:
@@ -98,186 +98,186 @@ entry:
 ; CHECK: lw  $ra, {{[0-9]+}}($sp)            # 4-byte Folded Reload
 ; CHECK: jr  $ra
 
-  %0 = load i32, i32* @a0, align 4
-  %1 = load i32*, i32** @b0, align 4
-  store i32 %0, i32* %1, align 4
-  %2 = load i32, i32* @a1, align 4
-  %3 = load i32*, i32** @b1, align 4
-  store i32 %2, i32* %3, align 4
-  %4 = load i32, i32* @a2, align 4
-  %5 = load i32*, i32** @b2, align 4
-  store i32 %4, i32* %5, align 4
-  %6 = load i32, i32* @a3, align 4
-  %7 = load i32*, i32** @b3, align 4
-  store i32 %6, i32* %7, align 4
-  %8 = load i32, i32* @a4, align 4
-  %9 = load i32*, i32** @b4, align 4
-  store i32 %8, i32* %9, align 4
-  %10 = load i32, i32* @a5, align 4
-  %11 = load i32*, i32** @b5, align 4
-  store i32 %10, i32* %11, align 4
-  %12 = load i32, i32* @a6, align 4
-  %13 = load i32*, i32** @b6, align 4
-  store i32 %12, i32* %13, align 4
-  %14 = load i32, i32* @a7, align 4
-  %15 = load i32*, i32** @b7, align 4
-  store i32 %14, i32* %15, align 4
-  %16 = load i32, i32* @a8, align 4
-  %17 = load i32*, i32** @b8, align 4
-  store i32 %16, i32* %17, align 4
-  %18 = load i32, i32* @a9, align 4
-  %19 = load i32*, i32** @b9, align 4
-  store i32 %18, i32* %19, align 4
-  %20 = load i32, i32* @a10, align 4
-  %21 = load i32*, i32** @b10, align 4
-  store i32 %20, i32* %21, align 4
-  %22 = load i32, i32* @a11, align 4
-  %23 = load i32*, i32** @b11, align 4
-  store i32 %22, i32* %23, align 4
-  %24 = load i32, i32* @a12, align 4
-  %25 = load i32*, i32** @b12, align 4
-  store i32 %24, i32* %25, align 4
-  %26 = load i32, i32* @a13, align 4
-  %27 = load i32*, i32** @b13, align 4
-  store i32 %26, i32* %27, align 4
-  %28 = load i32, i32* @a14, align 4
-  %29 = load i32*, i32** @b14, align 4
-  store i32 %28, i32* %29, align 4
-  %30 = load i32, i32* @a15, align 4
-  %31 = load i32*, i32** @b15, align 4
-  store i32 %30, i32* %31, align 4
-  %32 = load i32, i32* @a16, align 4
-  %33 = load i32*, i32** @b16, align 4
-  store i32 %32, i32* %33, align 4
-  %34 = load i32, i32* @a17, align 4
-  %35 = load i32*, i32** @b17, align 4
-  store i32 %34, i32* %35, align 4
-  %36 = load i32, i32* @a18, align 4
-  %37 = load i32*, i32** @b18, align 4
-  store i32 %36, i32* %37, align 4
-  %38 = load i32, i32* @a19, align 4
-  %39 = load i32*, i32** @b19, align 4
-  store i32 %38, i32* %39, align 4
-  %40 = load i32, i32* @a20, align 4
-  %41 = load i32*, i32** @b20, align 4
-  store i32 %40, i32* %41, align 4
-  %42 = load i32, i32* @a21, align 4
-  %43 = load i32*, i32** @b21, align 4
-  store i32 %42, i32* %43, align 4
-  %44 = load i32, i32* @a22, align 4
-  %45 = load i32*, i32** @b22, align 4
-  store i32 %44, i32* %45, align 4
-  %46 = load i32, i32* @a23, align 4
-  %47 = load i32*, i32** @b23, align 4
-  store i32 %46, i32* %47, align 4
-  %48 = load i32, i32* @a24, align 4
-  %49 = load i32*, i32** @b24, align 4
-  store i32 %48, i32* %49, align 4
-  %50 = load i32, i32* @a25, align 4
-  %51 = load i32*, i32** @b25, align 4
-  store i32 %50, i32* %51, align 4
-  %52 = load i32, i32* @a26, align 4
-  %53 = load i32*, i32** @b26, align 4
-  store i32 %52, i32* %53, align 4
-  %54 = load i32, i32* @a27, align 4
-  %55 = load i32*, i32** @b27, align 4
-  store i32 %54, i32* %55, align 4
-  %56 = load i32, i32* @a28, align 4
-  %57 = load i32*, i32** @b28, align 4
-  store i32 %56, i32* %57, align 4
-  %58 = load i32, i32* @a29, align 4
-  %59 = load i32*, i32** @b29, align 4
-  store i32 %58, i32* %59, align 4
-  %60 = load i32, i32* @a0, align 4
-  %61 = load i32*, i32** @c0, align 4
-  store i32 %60, i32* %61, align 4
-  %62 = load i32, i32* @a1, align 4
-  %63 = load i32*, i32** @c1, align 4
-  store i32 %62, i32* %63, align 4
-  %64 = load i32, i32* @a2, align 4
-  %65 = load i32*, i32** @c2, align 4
-  store i32 %64, i32* %65, align 4
-  %66 = load i32, i32* @a3, align 4
-  %67 = load i32*, i32** @c3, align 4
-  store i32 %66, i32* %67, align 4
-  %68 = load i32, i32* @a4, align 4
-  %69 = load i32*, i32** @c4, align 4
-  store i32 %68, i32* %69, align 4
-  %70 = load i32, i32* @a5, align 4
-  %71 = load i32*, i32** @c5, align 4
-  store i32 %70, i32* %71, align 4
-  %72 = load i32, i32* @a6, align 4
-  %73 = load i32*, i32** @c6, align 4
-  store i32 %72, i32* %73, align 4
-  %74 = load i32, i32* @a7, align 4
-  %75 = load i32*, i32** @c7, align 4
-  store i32 %74, i32* %75, align 4
-  %76 = load i32, i32* @a8, align 4
-  %77 = load i32*, i32** @c8, align 4
-  store i32 %76, i32* %77, align 4
-  %78 = load i32, i32* @a9, align 4
-  %79 = load i32*, i32** @c9, align 4
-  store i32 %78, i32* %79, align 4
-  %80 = load i32, i32* @a10, align 4
-  %81 = load i32*, i32** @c10, align 4
-  store i32 %80, i32* %81, align 4
-  %82 = load i32, i32* @a11, align 4
-  %83 = load i32*, i32** @c11, align 4
-  store i32 %82, i32* %83, align 4
-  %84 = load i32, i32* @a12, align 4
-  %85 = load i32*, i32** @c12, align 4
-  store i32 %84, i32* %85, align 4
-  %86 = load i32, i32* @a13, align 4
-  %87 = load i32*, i32** @c13, align 4
-  store i32 %86, i32* %87, align 4
-  %88 = load i32, i32* @a14, align 4
-  %89 = load i32*, i32** @c14, align 4
-  store i32 %88, i32* %89, align 4
-  %90 = load i32, i32* @a15, align 4
-  %91 = load i32*, i32** @c15, align 4
-  store i32 %90, i32* %91, align 4
-  %92 = load i32, i32* @a16, align 4
-  %93 = load i32*, i32** @c16, align 4
-  store i32 %92, i32* %93, align 4
-  %94 = load i32, i32* @a17, align 4
-  %95 = load i32*, i32** @c17, align 4
-  store i32 %94, i32* %95, align 4
-  %96 = load i32, i32* @a18, align 4
-  %97 = load i32*, i32** @c18, align 4
-  store i32 %96, i32* %97, align 4
-  %98 = load i32, i32* @a19, align 4
-  %99 = load i32*, i32** @c19, align 4
-  store i32 %98, i32* %99, align 4
-  %100 = load i32, i32* @a20, align 4
-  %101 = load i32*, i32** @c20, align 4
-  store i32 %100, i32* %101, align 4
-  %102 = load i32, i32* @a21, align 4
-  %103 = load i32*, i32** @c21, align 4
-  store i32 %102, i32* %103, align 4
-  %104 = load i32, i32* @a22, align 4
-  %105 = load i32*, i32** @c22, align 4
-  store i32 %104, i32* %105, align 4
-  %106 = load i32, i32* @a23, align 4
-  %107 = load i32*, i32** @c23, align 4
-  store i32 %106, i32* %107, align 4
-  %108 = load i32, i32* @a24, align 4
-  %109 = load i32*, i32** @c24, align 4
-  store i32 %108, i32* %109, align 4
-  %110 = load i32, i32* @a25, align 4
-  %111 = load i32*, i32** @c25, align 4
-  store i32 %110, i32* %111, align 4
-  %112 = load i32, i32* @a26, align 4
-  %113 = load i32*, i32** @c26, align 4
-  store i32 %112, i32* %113, align 4
-  %114 = load i32, i32* @a27, align 4
-  %115 = load i32*, i32** @c27, align 4
-  store i32 %114, i32* %115, align 4
-  %116 = load i32, i32* @a28, align 4
-  %117 = load i32*, i32** @c28, align 4
-  store i32 %116, i32* %117, align 4
-  %118 = load i32, i32* @a29, align 4
-  %119 = load i32*, i32** @c29, align 4
-  store i32 %118, i32* %119, align 4
-  %120 = load i32, i32* @a0, align 4
+  %0 = load i32, ptr @a0, align 4
+  %1 = load ptr, ptr @b0, align 4
+  store i32 %0, ptr %1, align 4
+  %2 = load i32, ptr @a1, align 4
+  %3 = load ptr, ptr @b1, align 4
+  store i32 %2, ptr %3, align 4
+  %4 = load i32, ptr @a2, align 4
+  %5 = load ptr, ptr @b2, align 4
+  store i32 %4, ptr %5, align 4
+  %6 = load i32, ptr @a3, align 4
+  %7 = load ptr, ptr @b3, align 4
+  store i32 %6, ptr %7, align 4
+  %8 = load i32, ptr @a4, align 4
+  %9 = load ptr, ptr @b4, align 4
+  store i32 %8, ptr %9, align 4
+  %10 = load i32, ptr @a5, align 4
+  %11 = load ptr, ptr @b5, align 4
+  store i32 %10, ptr %11, align 4
+  %12 = load i32, ptr @a6, align 4
+  %13 = load ptr, ptr @b6, align 4
+  store i32 %12, ptr %13, align 4
+  %14 = load i32, ptr @a7, align 4
+  %15 = load ptr, ptr @b7, align 4
+  store i32 %14, ptr %15, align 4
+  %16 = load i32, ptr @a8, align 4
+  %17 = load ptr, ptr @b8, align 4
+  store i32 %16, ptr %17, align 4
+  %18 = load i32, ptr @a9, align 4
+  %19 = load ptr, ptr @b9, align 4
+  store i32 %18, ptr %19, align 4
+  %20 = load i32, ptr @a10, align 4
+  %21 = load ptr, ptr @b10, align 4
+  store i32 %20, ptr %21, align 4
+  %22 = load i32, ptr @a11, align 4
+  %23 = load ptr, ptr @b11, align 4
+  store i32 %22, ptr %23, align 4
+  %24 = load i32, ptr @a12, align 4
+  %25 = load ptr, ptr @b12, align 4
+  store i32 %24, ptr %25, align 4
+  %26 = load i32, ptr @a13, align 4
+  %27 = load ptr, ptr @b13, align 4
+  store i32 %26, ptr %27, align 4
+  %28 = load i32, ptr @a14, align 4
+  %29 = load ptr, ptr @b14, align 4
+  store i32 %28, ptr %29, align 4
+  %30 = load i32, ptr @a15, align 4
+  %31 = load ptr, ptr @b15, align 4
+  store i32 %30, ptr %31, align 4
+  %32 = load i32, ptr @a16, align 4
+  %33 = load ptr, ptr @b16, align 4
+  store i32 %32, ptr %33, align 4
+  %34 = load i32, ptr @a17, align 4
+  %35 = load ptr, ptr @b17, align 4
+  store i32 %34, ptr %35, align 4
+  %36 = load i32, ptr @a18, align 4
+  %37 = load ptr, ptr @b18, align 4
+  store i32 %36, ptr %37, align 4
+  %38 = load i32, ptr @a19, align 4
+  %39 = load ptr, ptr @b19, align 4
+  store i32 %38, ptr %39, align 4
+  %40 = load i32, ptr @a20, align 4
+  %41 = load ptr, ptr @b20, align 4
+  store i32 %40, ptr %41, align 4
+  %42 = load i32, ptr @a21, align 4
+  %43 = load ptr, ptr @b21, align 4
+  store i32 %42, ptr %43, align 4
+  %44 = load i32, ptr @a22, align 4
+  %45 = load ptr, ptr @b22, align 4
+  store i32 %44, ptr %45, align 4
+  %46 = load i32, ptr @a23, align 4
+  %47 = load ptr, ptr @b23, align 4
+  store i32 %46, ptr %47, align 4
+  %48 = load i32, ptr @a24, align 4
+  %49 = load ptr, ptr @b24, align 4
+  store i32 %48, ptr %49, align 4
+  %50 = load i32, ptr @a25, align 4
+  %51 = load ptr, ptr @b25, align 4
+  store i32 %50, ptr %51, align 4
+  %52 = load i32, ptr @a26, align 4
+  %53 = load ptr, ptr @b26, align 4
+  store i32 %52, ptr %53, align 4
+  %54 = load i32, ptr @a27, align 4
+  %55 = load ptr, ptr @b27, align 4
+  store i32 %54, ptr %55, align 4
+  %56 = load i32, ptr @a28, align 4
+  %57 = load ptr, ptr @b28, align 4
+  store i32 %56, ptr %57, align 4
+  %58 = load i32, ptr @a29, align 4
+  %59 = load ptr, ptr @b29, align 4
+  store i32 %58, ptr %59, align 4
+  %60 = load i32, ptr @a0, align 4
+  %61 = load ptr, ptr @c0, align 4
+  store i32 %60, ptr %61, align 4
+  %62 = load i32, ptr @a1, align 4
+  %63 = load ptr, ptr @c1, align 4
+  store i32 %62, ptr %63, align 4
+  %64 = load i32, ptr @a2, align 4
+  %65 = load ptr, ptr @c2, align 4
+  store i32 %64, ptr %65, align 4
+  %66 = load i32, ptr @a3, align 4
+  %67 = load ptr, ptr @c3, align 4
+  store i32 %66, ptr %67, align 4
+  %68 = load i32, ptr @a4, align 4
+  %69 = load ptr, ptr @c4, align 4
+  store i32 %68, ptr %69, align 4
+  %70 = load i32, ptr @a5, align 4
+  %71 = load ptr, ptr @c5, align 4
+  store i32 %70, ptr %71, align 4
+  %72 = load i32, ptr @a6, align 4
+  %73 = load ptr, ptr @c6, align 4
+  store i32 %72, ptr %73, align 4
+  %74 = load i32, ptr @a7, align 4
+  %75 = load ptr, ptr @c7, align 4
+  store i32 %74, ptr %75, align 4
+  %76 = load i32, ptr @a8, align 4
+  %77 = load ptr, ptr @c8, align 4
+  store i32 %76, ptr %77, align 4
+  %78 = load i32, ptr @a9, align 4
+  %79 = load ptr, ptr @c9, align 4
+  store i32 %78, ptr %79, align 4
+  %80 = load i32, ptr @a10, align 4
+  %81 = load ptr, ptr @c10, align 4
+  store i32 %80, ptr %81, align 4
+  %82 = load i32, ptr @a11, align 4
+  %83 = load ptr, ptr @c11, align 4
+  store i32 %82, ptr %83, align 4
+  %84 = load i32, ptr @a12, align 4
+  %85 = load ptr, ptr @c12, align 4
+  store i32 %84, ptr %85, align 4
+  %86 = load i32, ptr @a13, align 4
+  %87 = load ptr, ptr @c13, align 4
+  store i32 %86, ptr %87, align 4
+  %88 = load i32, ptr @a14, align 4
+  %89 = load ptr, ptr @c14, align 4
+  store i32 %88, ptr %89, align 4
+  %90 = load i32, ptr @a15, align 4
+  %91 = load ptr, ptr @c15, align 4
+  store i32 %90, ptr %91, align 4
+  %92 = load i32, ptr @a16, align 4
+  %93 = load ptr, ptr @c16, align 4
+  store i32 %92, ptr %93, align 4
+  %94 = load i32, ptr @a17, align 4
+  %95 = load ptr, ptr @c17, align 4
+  store i32 %94, ptr %95, align 4
+  %96 = load i32, ptr @a18, align 4
+  %97 = load ptr, ptr @c18, align 4
+  store i32 %96, ptr %97, align 4
+  %98 = load i32, ptr @a19, align 4
+  %99 = load ptr, ptr @c19, align 4
+  store i32 %98, ptr %99, align 4
+  %100 = load i32, ptr @a20, align 4
+  %101 = load ptr, ptr @c20, align 4
+  store i32 %100, ptr %101, align 4
+  %102 = load i32, ptr @a21, align 4
+  %103 = load ptr, ptr @c21, align 4
+  store i32 %102, ptr %103, align 4
+  %104 = load i32, ptr @a22, align 4
+  %105 = load ptr, ptr @c22, align 4
+  store i32 %104, ptr %105, align 4
+  %106 = load i32, ptr @a23, align 4
+  %107 = load ptr, ptr @c23, align 4
+  store i32 %106, ptr %107, align 4
+  %108 = load i32, ptr @a24, align 4
+  %109 = load ptr, ptr @c24, align 4
+  store i32 %108, ptr %109, align 4
+  %110 = load i32, ptr @a25, align 4
+  %111 = load ptr, ptr @c25, align 4
+  store i32 %110, ptr %111, align 4
+  %112 = load i32, ptr @a26, align 4
+  %113 = load ptr, ptr @c26, align 4
+  store i32 %112, ptr %113, align 4
+  %114 = load i32, ptr @a27, align 4
+  %115 = load ptr, ptr @c27, align 4
+  store i32 %114, ptr %115, align 4
+  %116 = load i32, ptr @a28, align 4
+  %117 = load ptr, ptr @c28, align 4
+  store i32 %116, ptr %117, align 4
+  %118 = load i32, ptr @a29, align 4
+  %119 = load ptr, ptr @c29, align 4
+  store i32 %118, ptr %119, align 4
+  %120 = load i32, ptr @a0, align 4
   ret i32 %120
 }

diff  --git a/llvm/test/CodeGen/Mips/rdhwr-directives.ll b/llvm/test/CodeGen/Mips/rdhwr-directives.ll
index dd5381106f6bd..b2fae4a06f7a6 100644
--- a/llvm/test/CodeGen/Mips/rdhwr-directives.ll
+++ b/llvm/test/CodeGen/Mips/rdhwr-directives.ll
@@ -9,7 +9,7 @@ entry:
 ; CHECK: rdhwr 
 ; CHECK: .set  pop
 
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/reloc-jalr.ll b/llvm/test/CodeGen/Mips/reloc-jalr.ll
index 10fc3ac5f5af6..88bbfa7fdfc36 100644
--- a/llvm/test/CodeGen/Mips/reloc-jalr.ll
+++ b/llvm/test/CodeGen/Mips/reloc-jalr.ll
@@ -146,30 +146,30 @@ entry:
 ; Previously we were adding them for local dynamic TLS function pointers and
 ; function pointers with internal linkage.
 
- at fnptr_internal = internal global void()* @checkFunctionPointerCall
- at fnptr_internal_const = internal constant void()* @checkFunctionPointerCall
- at fnptr_const = constant void()* @checkFunctionPointerCall
- at fnptr_global = global void()* @checkFunctionPointerCall
+ at fnptr_internal = internal global ptr @checkFunctionPointerCall
+ at fnptr_internal_const = internal constant ptr @checkFunctionPointerCall
+ at fnptr_const = constant ptr @checkFunctionPointerCall
+ at fnptr_global = global ptr @checkFunctionPointerCall
 
 define void @checkFunctionPointerCall() {
 entry:
 ; ALL-LABEL: checkFunctionPointerCall:
 ; ALL-NOT: MIPS_JALR
-  %func_internal = load void()*, void()** @fnptr_internal
+  %func_internal = load ptr, ptr @fnptr_internal
   call void %func_internal()
-  %func_internal_const = load void()*, void()** @fnptr_internal_const
+  %func_internal_const = load ptr, ptr @fnptr_internal_const
   call void %func_internal_const()
-  %func_const = load void()*, void()** @fnptr_const
+  %func_const = load ptr, ptr @fnptr_const
   call void %func_const()
-  %func_global = load void()*, void()** @fnptr_global
+  %func_global = load ptr, ptr @fnptr_global
   call void %func_global()
   ret void
 }
 
- at tls_fnptr_gd = thread_local global void()* @checkTlsFunctionPointerCall
- at tls_fnptr_ld = thread_local(localdynamic) global void()* @checkTlsFunctionPointerCall
- at tls_fnptr_ie = thread_local(initialexec) global void()* @checkTlsFunctionPointerCall
- at tls_fnptr_le = thread_local(localexec) global void()* @checkTlsFunctionPointerCall
+ at tls_fnptr_gd = thread_local global ptr @checkTlsFunctionPointerCall
+ at tls_fnptr_ld = thread_local(localdynamic) global ptr @checkTlsFunctionPointerCall
+ at tls_fnptr_ie = thread_local(initialexec) global ptr @checkTlsFunctionPointerCall
+ at tls_fnptr_le = thread_local(localexec) global ptr @checkTlsFunctionPointerCall
 
 define void @checkTlsFunctionPointerCall() {
 entry:
@@ -182,13 +182,13 @@ entry:
 ; JALR-ALL: .reloc {{.+}}MIPS_JALR, __tls_get_addr
 ; NORELOC-NOT:   .reloc
 ; ALL-NOT: _MIPS_JALR
-  %func_gd = load void()*, void()** @tls_fnptr_gd
+  %func_gd = load ptr, ptr @tls_fnptr_gd
   call void %func_gd()
-  %func_ld = load void()*, void()** @tls_fnptr_ld
+  %func_ld = load ptr, ptr @tls_fnptr_ld
   call void %func_ld()
-  %func_ie = load void()*, void()** @tls_fnptr_ie
+  %func_ie = load ptr, ptr @tls_fnptr_ie
   call void %func_ie()
-  %func_le = load void()*, void()** @tls_fnptr_le
+  %func_le = load ptr, ptr @tls_fnptr_le
   call void %func_le()
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/rem.ll b/llvm/test/CodeGen/Mips/rem.ll
index ef16483f39d3c..525ab633ca019 100644
--- a/llvm/test/CodeGen/Mips/rem.ll
+++ b/llvm/test/CodeGen/Mips/rem.ll
@@ -7,12 +7,12 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %rem = srem i32 %0, %1
 ; 16:	div	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
-  store i32 %rem, i32* @kkkk, align 4
+  store i32 %rem, ptr @kkkk, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/remu.ll b/llvm/test/CodeGen/Mips/remu.ll
index dac4b05cd00a1..3d5f5d7629d95 100644
--- a/llvm/test/CodeGen/Mips/remu.ll
+++ b/llvm/test/CodeGen/Mips/remu.ll
@@ -7,12 +7,12 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @iiii, align 4
-  %1 = load i32, i32* @jjjj, align 4
+  %0 = load i32, ptr @iiii, align 4
+  %1 = load i32, ptr @jjjj, align 4
   %rem = urem i32 %0, %1
 ; 16:	divu	$zero, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16: 	mfhi	${{[0-9]+}}
-  store i32 %rem, i32* @kkkk, align 4
+  store i32 %rem, ptr @kkkk, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/return_address.ll b/llvm/test/CodeGen/Mips/return_address.ll
index 54a106f4b349e..65b4ff5733f43 100644
--- a/llvm/test/CodeGen/Mips/return_address.ll
+++ b/llvm/test/CodeGen/Mips/return_address.ll
@@ -1,23 +1,23 @@
 ; RUN: llc -march=mipsel -verify-machineinstrs < %s | FileCheck %s
 
-define i8* @f1() nounwind {
+define ptr @f1() nounwind {
 entry:
-  %0 = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 
 ; CHECK:    move  $2, $ra
 }
 
-define i8* @f2() nounwind {
+define ptr @f2() nounwind {
 entry:
   call void @g()
-  %0 = call i8* @llvm.returnaddress(i32 0)
-  ret i8* %0
+  %0 = call ptr @llvm.returnaddress(i32 0)
+  ret ptr %0
 
 ; CHECK:    move  $[[R0:[0-9]+]], $ra
 ; CHECK:    jal
 ; CHECK:    move  $2, $[[R0]]
 }
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
 declare void @g()

diff  --git a/llvm/test/CodeGen/Mips/return_address_err.ll b/llvm/test/CodeGen/Mips/return_address_err.ll
index b2507656aa1f3..bf2ab3c164886 100644
--- a/llvm/test/CodeGen/Mips/return_address_err.ll
+++ b/llvm/test/CodeGen/Mips/return_address_err.ll
@@ -1,11 +1,11 @@
 ; RUN: not llc -march=mips < %s 2>&1 | FileCheck %s
 
-declare i8* @llvm.returnaddress(i32) nounwind readnone
+declare ptr @llvm.returnaddress(i32) nounwind readnone
 
-define i8* @f() nounwind {
+define ptr @f() nounwind {
 entry:
-  %0 = call i8* @llvm.returnaddress(i32 1)
-  ret i8* %0
+  %0 = call ptr @llvm.returnaddress(i32 1)
+  ret ptr %0
 
 ; CHECK: error: return address can be determined only for current frame
 }

diff  --git a/llvm/test/CodeGen/Mips/s2rem.ll b/llvm/test/CodeGen/Mips/s2rem.ll
index 9125632556a20..92bb5ce51f31b 100644
--- a/llvm/test/CodeGen/Mips/s2rem.ll
+++ b/llvm/test/CodeGen/Mips/s2rem.ll
@@ -11,7 +11,7 @@
 define void @it() #0 {
 entry:
   %call = call i32 @i(i32 1)
-  store i32 %call, i32* @xi, align 4
+  store i32 %call, ptr @xi, align 4
   ret void
 ; PIC: 	.ent	it
 ; STATIC: 	.ent	it
@@ -29,7 +29,7 @@ declare i32 @i(i32) #1
 define void @ft() #0 {
 entry:
   %call = call float @f()
-  store float %call, float* @x, align 4
+  store float %call, ptr @x, align 4
   ret void
 ; PIC: 	.ent	ft
 ; PIC: 	save	$16, $17, $ra, $18, [[FS:[0-9]+]]
@@ -43,7 +43,7 @@ declare float @f() #1
 define void @dt() #0 {
 entry:
   %call = call double @d()
-  store double %call, double* @xd, align 8
+  store double %call, ptr @xd, align 8
   ret void
 ; PIC: 	.ent	dt
 ; PIC: 	save	$16, $17, $ra, $18, [[FS:[0-9]+]]
@@ -56,9 +56,9 @@ declare double @d() #1
 ; Function Attrs: nounwind
 define void @fft() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   %call = call float @ff(float %0)
-  store float %call, float* @x, align 4
+  store float %call, ptr @x, align 4
   ret void
 ; PIC: 	.ent	fft
 ; PIC: 	save	$16, $17, $ra, $18, [[FS:[0-9]+]]
@@ -71,7 +71,7 @@ declare float @ff(float) #1
 ; Function Attrs: nounwind
 define void @vft() #0 {
 entry:
-  %0 = load float, float* @x, align 4
+  %0 = load float, ptr @x, align 4
   call void @vf(float %0)
   ret void
 ; PIC: 	.ent	vft

diff  --git a/llvm/test/CodeGen/Mips/sb1.ll b/llvm/test/CodeGen/Mips/sb1.ll
index 4724a7f2cfd73..8b79f39d2c70f 100644
--- a/llvm/test/CodeGen/Mips/sb1.ll
+++ b/llvm/test/CodeGen/Mips/sb1.ll
@@ -6,15 +6,15 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = trunc i32 %0 to i8
-  store i8 %conv, i8* @c, align 1
-  %1 = load i32, i32* @i, align 4
-  %2 = load i8, i8* @c, align 1
+  store i8 %conv, ptr @c, align 1
+  %1 = load i32, ptr @i, align 4
+  %2 = load i8, ptr @c, align 1
   %conv1 = sext i8 %2 to i32
 ; 16:	sb	${{[0-9]+}}, 0(${{[0-9]+}})
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1, i32 %conv1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/sel1c.ll b/llvm/test/CodeGen/Mips/sel1c.ll
index 7013f281350ff..b4cba37b78ec9 100644
--- a/llvm/test/CodeGen/Mips/sel1c.ll
+++ b/llvm/test/CodeGen/Mips/sel1c.ll
@@ -7,11 +7,11 @@
 ; Function Attrs: nounwind optsize
 define void @t() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp eq i32 %0, %1
   %cond = select i1 %cmp, i32 1, i32 3
-  store i32 %cond, i32* @k, align 4
+  store i32 %cond, ptr @k, align 4
   ret void
 ; cond-b-short:	bteqz	$BB0_{{[0-9]+}}  # 16 bit inst
 }

diff  --git a/llvm/test/CodeGen/Mips/sel2c.ll b/llvm/test/CodeGen/Mips/sel2c.ll
index 73f9e1e91e087..9cf8b20eecdd5 100644
--- a/llvm/test/CodeGen/Mips/sel2c.ll
+++ b/llvm/test/CodeGen/Mips/sel2c.ll
@@ -7,11 +7,11 @@
 ; Function Attrs: nounwind optsize
 define void @t() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp ne i32 %0, %1
   %cond = select i1 %cmp, i32 1, i32 3
-  store i32 %cond, i32* @k, align 4
+  store i32 %cond, ptr @k, align 4
 ; cond-b-short:	btnez	$BB0_{{[0-9]+}}  # 16 bit inst
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/selTBteqzCmpi.ll b/llvm/test/CodeGen/Mips/selTBteqzCmpi.ll
index 97eba29e99fbf..a81393b0b0807 100644
--- a/llvm/test/CodeGen/Mips/selTBteqzCmpi.ll
+++ b/llvm/test/CodeGen/Mips/selTBteqzCmpi.ll
@@ -8,12 +8,12 @@
 
 define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp eq i32 %0, 10
-  %1 = load i32, i32* @i, align 4
-  %2 = load i32, i32* @j, align 4
+  %1 = load i32, ptr @i, align 4
+  %2 = load i32, ptr @j, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @i, align 4
+  store i32 %cond, ptr @i, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selTBtnezCmpi.ll b/llvm/test/CodeGen/Mips/selTBtnezCmpi.ll
index 62af3dffb7b3e..e703e317a0fb8 100644
--- a/llvm/test/CodeGen/Mips/selTBtnezCmpi.ll
+++ b/llvm/test/CodeGen/Mips/selTBtnezCmpi.ll
@@ -8,12 +8,12 @@
 
 define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp ne i32 %0, 10
-  %1 = load i32, i32* @i, align 4
-  %2 = load i32, i32* @j, align 4
+  %1 = load i32, ptr @i, align 4
+  %2 = load i32, ptr @j, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @i, align 4
+  store i32 %cond, ptr @i, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selTBtnezSlti.ll b/llvm/test/CodeGen/Mips/selTBtnezSlti.ll
index 3851fdf093e44..132d5ed770207 100644
--- a/llvm/test/CodeGen/Mips/selTBtnezSlti.ll
+++ b/llvm/test/CodeGen/Mips/selTBtnezSlti.ll
@@ -8,12 +8,12 @@
 
 define void @t() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, 10
-  %1 = load i32, i32* @j, align 4
-  %2 = load i32, i32* @i, align 4
+  %1 = load i32, ptr @j, align 4
+  %2 = load i32, ptr @i, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @i, align 4
+  store i32 %cond, ptr @i, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/select.ll b/llvm/test/CodeGen/Mips/select.ll
index a908480d2eaa0..12067e1b3d82c 100644
--- a/llvm/test/CodeGen/Mips/select.ll
+++ b/llvm/test/CodeGen/Mips/select.ll
@@ -984,8 +984,8 @@ define i32 @f64_fcmp_oeq_i32_val(i32 signext %f0, i32 signext %f1) nounwind read
 ; 64R6-NEXT:    jr $ra
 ; 64R6-NEXT:    or $2, $1, $2
 entry:
-  %tmp = load double, double* @d2, align 8
-  %tmp1 = load double, double* @d3, align 8
+  %tmp = load double, ptr @d2, align 8
+  %tmp1 = load double, ptr @d3, align 8
   %cmp = fcmp oeq double %tmp, %tmp1
   %cond = select i1 %cmp, i32 %f0, i32 %f1
   ret i32 %cond
@@ -1082,8 +1082,8 @@ define i32 @f64_fcmp_olt_i32_val(i32 signext %f0, i32 signext %f1) nounwind read
 ; 64R6-NEXT:    jr $ra
 ; 64R6-NEXT:    or $2, $1, $2
 entry:
-  %tmp = load double, double* @d2, align 8
-  %tmp1 = load double, double* @d3, align 8
+  %tmp = load double, ptr @d2, align 8
+  %tmp1 = load double, ptr @d3, align 8
   %cmp = fcmp olt double %tmp, %tmp1
   %cond = select i1 %cmp, i32 %f0, i32 %f1
   ret i32 %cond
@@ -1180,8 +1180,8 @@ define i32 @f64_fcmp_ogt_i32_val(i32 signext %f0, i32 signext %f1) nounwind read
 ; 64R6-NEXT:    jr $ra
 ; 64R6-NEXT:    or $2, $1, $2
 entry:
-  %tmp = load double, double* @d2, align 8
-  %tmp1 = load double, double* @d3, align 8
+  %tmp = load double, ptr @d2, align 8
+  %tmp1 = load double, ptr @d3, align 8
   %cmp = fcmp ogt double %tmp, %tmp1
   %cond = select i1 %cmp, i32 %f0, i32 %f1
   ret i32 %cond

diff  --git a/llvm/test/CodeGen/Mips/selectcc.ll b/llvm/test/CodeGen/Mips/selectcc.ll
index 865e4b38acad6..1bec29e8bcd5f 100644
--- a/llvm/test/CodeGen/Mips/selectcc.ll
+++ b/llvm/test/CodeGen/Mips/selectcc.ll
@@ -23,8 +23,8 @@ entry:
 ; SOURCE-SCHED: mtc1
 ; SOURCE-SCHED: c.olt.s
 ; SOURCE-SCHED: jr
-  store float 0.000000e+00, float* @gf0, align 4
-  store float 1.000000e+00, float* @gf1, align 4
+  store float 0.000000e+00, ptr @gf0, align 4
+  store float 1.000000e+00, ptr @gf1, align 4
   %cmp = fcmp olt float %a, %b
   %conv = zext i1 %cmp to i32
   %conv1 = sitofp i32 %conv to float
@@ -33,8 +33,8 @@ entry:
 
 define double @select_cc_f64(double %a, double %b) nounwind {
 entry:
-  store double 0.000000e+00, double* @gd0, align 8
-  store double 1.000000e+00, double* @gd1, align 8
+  store double 0.000000e+00, ptr @gd0, align 8
+  store double 1.000000e+00, ptr @gd1, align 8
   %cmp = fcmp olt double %a, %b
   %conv = zext i1 %cmp to i32
   %conv1 = sitofp i32 %conv to double

diff  --git a/llvm/test/CodeGen/Mips/selectiondag-optlevel.ll b/llvm/test/CodeGen/Mips/selectiondag-optlevel.ll
index 9993611317457..9632eac84419c 100644
--- a/llvm/test/CodeGen/Mips/selectiondag-optlevel.ll
+++ b/llvm/test/CodeGen/Mips/selectiondag-optlevel.ll
@@ -7,10 +7,9 @@
 define void @foo() nounwind {
 entry:
   %0 = alloca [2 x i8], align 32
-  %1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
-  store i8 1, i8* %1
-  %2 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 1
-  store i8 1, i8* %2
+  store i8 1, ptr %0
+  %1 = getelementptr inbounds [2 x i8], ptr %0, i32 0, i32 1
+  store i8 1, ptr %1
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/seleq.ll b/llvm/test/CodeGen/Mips/seleq.ll
index 7d1e034d68c7c..ecbeb2b51e3d7 100644
--- a/llvm/test/CodeGen/Mips/seleq.ll
+++ b/llvm/test/CodeGen/Mips/seleq.ll
@@ -12,70 +12,70 @@
 
 define void @calc_seleq() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp eq i32 %0, %1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %2 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %3 = load i32, i32* @t, align 4
+  %3 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %4 = load i32, i32* @b, align 4
-  %5 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %4 = load i32, ptr @b, align 4
+  %5 = load i32, ptr @a, align 4
   %cmp1 = icmp eq i32 %4, %5
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %6 = load i32, i32* @f, align 4
+  %6 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %7 = load i32, i32* @t, align 4
+  %7 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %8 = load i32, i32* @c, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %8 = load i32, ptr @c, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp6 = icmp eq i32 %8, %9
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %10 = load i32, i32* @t, align 4
+  %10 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %11 = load i32, i32* @f, align 4
+  %11 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %12 = load i32, i32* @a, align 4
-  %13 = load i32, i32* @c, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %12 = load i32, ptr @a, align 4
+  %13 = load i32, ptr @c, align 4
   %cmp11 = icmp eq i32 %12, %13
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %14 = load i32, i32* @t, align 4
+  %14 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %15 = load i32, i32* @f, align 4
+  %15 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/seleqk.ll b/llvm/test/CodeGen/Mips/seleqk.ll
index a0bfe44eadd65..911c6f1996b67 100644
--- a/llvm/test/CodeGen/Mips/seleqk.ll
+++ b/llvm/test/CodeGen/Mips/seleqk.ll
@@ -12,66 +12,66 @@
 
 define void @calc_seleqk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp eq i32 %0, 1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %1 = load i32, i32* @t, align 4
+  %1 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %2 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %3 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %3 = load i32, ptr @a, align 4
   %cmp1 = icmp eq i32 %3, 1000
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %4 = load i32, i32* @f, align 4
+  %4 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %5 = load i32, i32* @t, align 4
+  %5 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %6 = load i32, i32* @b, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %6 = load i32, ptr @b, align 4
   %cmp6 = icmp eq i32 %6, 3
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %7 = load i32, i32* @f, align 4
+  %7 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %8 = load i32, i32* @t, align 4
+  %8 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %9 = load i32, i32* @b, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %9 = load i32, ptr @b, align 4
   %cmp11 = icmp eq i32 %9, 1000
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %10 = load i32, i32* @t, align 4
+  %10 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %11 = load i32, i32* @f, align 4
+  %11 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selgek.ll b/llvm/test/CodeGen/Mips/selgek.ll
index 9d9df743db9b9..a909bb543538b 100644
--- a/llvm/test/CodeGen/Mips/selgek.ll
+++ b/llvm/test/CodeGen/Mips/selgek.ll
@@ -13,66 +13,66 @@
 
 define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp sge i32 %0, 1000
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %1 = load i32, i32* @f, align 4
+  %1 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %2 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %3 = load i32, i32* @b, align 4
+  store i32 %cond, ptr @z1, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp1 = icmp sge i32 %3, 1
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %4 = load i32, i32* @t, align 4
+  %4 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %5 = load i32, i32* @f, align 4
+  %5 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %6 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %6 = load i32, ptr @c, align 4
   %cmp6 = icmp sge i32 %6, 2
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %7 = load i32, i32* @t, align 4
+  %7 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %8 = load i32, i32* @f, align 4
+  %8 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp11 = icmp sge i32 %9, 2
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %10 = load i32, i32* @t, align 4
+  %10 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %11 = load i32, i32* @f, align 4
+  %11 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selgt.ll b/llvm/test/CodeGen/Mips/selgt.ll
index 94f0f9b50af1c..30d7f8ff3a695 100644
--- a/llvm/test/CodeGen/Mips/selgt.ll
+++ b/llvm/test/CodeGen/Mips/selgt.ll
@@ -14,71 +14,71 @@
 define i32 @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
   %retval = alloca i32, align 4
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp sgt i32 %0, %1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %2 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %3 = load i32, i32* @t, align 4
+  %3 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %4 = load i32, i32* @b, align 4
-  %5 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %4 = load i32, ptr @b, align 4
+  %5 = load i32, ptr @a, align 4
   %cmp1 = icmp sgt i32 %4, %5
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %6 = load i32, i32* @t, align 4
+  %6 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %7 = load i32, i32* @f, align 4
+  %7 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %8 = load i32, i32* @c, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %8 = load i32, ptr @c, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp6 = icmp sgt i32 %8, %9
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %10 = load i32, i32* @f, align 4
+  %10 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %11 = load i32, i32* @t, align 4
+  %11 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %12 = load i32, i32* @a, align 4
-  %13 = load i32, i32* @c, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %12 = load i32, ptr @a, align 4
+  %13 = load i32, ptr @c, align 4
   %cmp11 = icmp sgt i32 %12, %13
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %14 = load i32, i32* @f, align 4
+  %14 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %15 = load i32, i32* @t, align 4
+  %15 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
-  %16 = load i32, i32* %retval
+  store i32 %cond15, ptr @z4, align 4
+  %16 = load i32, ptr %retval
   ret i32 %16
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selle.ll b/llvm/test/CodeGen/Mips/selle.ll
index 8925aac10c4d1..bccc3de56705e 100644
--- a/llvm/test/CodeGen/Mips/selle.ll
+++ b/llvm/test/CodeGen/Mips/selle.ll
@@ -13,70 +13,70 @@
 
 define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp sle i32 %0, %1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %2 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %3 = load i32, i32* @f, align 4
+  %3 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %4 = load i32, i32* @b, align 4
-  %5 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %4 = load i32, ptr @b, align 4
+  %5 = load i32, ptr @a, align 4
   %cmp1 = icmp sle i32 %4, %5
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %6 = load i32, i32* @f, align 4
+  %6 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %7 = load i32, i32* @t, align 4
+  %7 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %8 = load i32, i32* @c, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %8 = load i32, ptr @c, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp6 = icmp sle i32 %8, %9
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %10 = load i32, i32* @t, align 4
+  %10 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %11 = load i32, i32* @f, align 4
+  %11 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %12 = load i32, i32* @a, align 4
-  %13 = load i32, i32* @c, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %12 = load i32, ptr @a, align 4
+  %13 = load i32, ptr @c, align 4
   %cmp11 = icmp sle i32 %12, %13
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %14 = load i32, i32* @t, align 4
+  %14 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %15 = load i32, i32* @f, align 4
+  %15 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selltk.ll b/llvm/test/CodeGen/Mips/selltk.ll
index 106fe9b85d602..b070c301b0199 100644
--- a/llvm/test/CodeGen/Mips/selltk.ll
+++ b/llvm/test/CodeGen/Mips/selltk.ll
@@ -13,66 +13,66 @@
 
 define void @calc_selltk() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, 1000
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %1 = load i32, i32* @t, align 4
+  %1 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %2 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %3 = load i32, i32* @b, align 4
+  store i32 %cond, ptr @z1, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp1 = icmp slt i32 %3, 2
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %4 = load i32, i32* @f, align 4
+  %4 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %5 = load i32, i32* @t, align 4
+  %5 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %6 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %6 = load i32, ptr @c, align 4
   %cmp6 = icmp sgt i32 %6, 2
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %7 = load i32, i32* @f, align 4
+  %7 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %8 = load i32, i32* @t, align 4
+  %8 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp11 = icmp sgt i32 %9, 2
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %10 = load i32, i32* @f, align 4
+  %10 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %11 = load i32, i32* @t, align 4
+  %11 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selne.ll b/llvm/test/CodeGen/Mips/selne.ll
index 270c0dadd8640..6fe9e48279877 100644
--- a/llvm/test/CodeGen/Mips/selne.ll
+++ b/llvm/test/CodeGen/Mips/selne.ll
@@ -13,70 +13,70 @@
 
 define void @calc_seleq() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp ne i32 %0, %1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %2 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %3 = load i32, i32* @t, align 4
+  %3 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %2, %cond.true ], [ %3, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %4 = load i32, i32* @b, align 4
-  %5 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %4 = load i32, ptr @b, align 4
+  %5 = load i32, ptr @a, align 4
   %cmp1 = icmp ne i32 %4, %5
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %6 = load i32, i32* @f, align 4
+  %6 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %7 = load i32, i32* @t, align 4
+  %7 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %6, %cond.true2 ], [ %7, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %8 = load i32, i32* @c, align 4
-  %9 = load i32, i32* @a, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %8 = load i32, ptr @c, align 4
+  %9 = load i32, ptr @a, align 4
   %cmp6 = icmp ne i32 %8, %9
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %10 = load i32, i32* @t, align 4
+  %10 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %11 = load i32, i32* @f, align 4
+  %11 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %10, %cond.true7 ], [ %11, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %12 = load i32, i32* @a, align 4
-  %13 = load i32, i32* @c, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %12 = load i32, ptr @a, align 4
+  %13 = load i32, ptr @c, align 4
   %cmp11 = icmp ne i32 %12, %13
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %14 = load i32, i32* @t, align 4
+  %14 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %15 = load i32, i32* @f, align 4
+  %15 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %14, %cond.true12 ], [ %15, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/selnek.ll b/llvm/test/CodeGen/Mips/selnek.ll
index 13ab693adb8b1..f38ab246e60f4 100644
--- a/llvm/test/CodeGen/Mips/selnek.ll
+++ b/llvm/test/CodeGen/Mips/selnek.ll
@@ -12,84 +12,84 @@
 
 define void @calc_z() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp ne i32 %0, 1
   br i1 %cmp, label %cond.true, label %cond.false
 
 cond.true:                                        ; preds = %entry
-  %1 = load i32, i32* @f, align 4
+  %1 = load i32, ptr @f, align 4
   br label %cond.end
 
 cond.false:                                       ; preds = %entry
-  %2 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @t, align 4
   br label %cond.end
 
 cond.end:                                         ; preds = %cond.false, %cond.true
   %cond = phi i32 [ %1, %cond.true ], [ %2, %cond.false ]
-  store i32 %cond, i32* @z1, align 4
-  %3 = load i32, i32* @a, align 4
+  store i32 %cond, ptr @z1, align 4
+  %3 = load i32, ptr @a, align 4
   %cmp1 = icmp ne i32 %3, 1000
   br i1 %cmp1, label %cond.true2, label %cond.false3
 
 cond.true2:                                       ; preds = %cond.end
-  %4 = load i32, i32* @t, align 4
+  %4 = load i32, ptr @t, align 4
   br label %cond.end4
 
 cond.false3:                                      ; preds = %cond.end
-  %5 = load i32, i32* @f, align 4
+  %5 = load i32, ptr @f, align 4
   br label %cond.end4
 
 cond.end4:                                        ; preds = %cond.false3, %cond.true2
   %cond5 = phi i32 [ %4, %cond.true2 ], [ %5, %cond.false3 ]
-  store i32 %cond5, i32* @z2, align 4
-  %6 = load i32, i32* @b, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %6 = load i32, ptr @b, align 4
   %cmp6 = icmp ne i32 %6, 3
   br i1 %cmp6, label %cond.true7, label %cond.false8
 
 cond.true7:                                       ; preds = %cond.end4
-  %7 = load i32, i32* @t, align 4
+  %7 = load i32, ptr @t, align 4
   br label %cond.end9
 
 cond.false8:                                      ; preds = %cond.end4
-  %8 = load i32, i32* @f, align 4
+  %8 = load i32, ptr @f, align 4
   br label %cond.end9
 
 cond.end9:                                        ; preds = %cond.false8, %cond.true7
   %cond10 = phi i32 [ %7, %cond.true7 ], [ %8, %cond.false8 ]
-  store i32 %cond10, i32* @z3, align 4
-  %9 = load i32, i32* @b, align 4
+  store i32 %cond10, ptr @z3, align 4
+  %9 = load i32, ptr @b, align 4
   %cmp11 = icmp ne i32 %9, 1000
   br i1 %cmp11, label %cond.true12, label %cond.false13
 
 cond.true12:                                      ; preds = %cond.end9
-  %10 = load i32, i32* @f, align 4
+  %10 = load i32, ptr @f, align 4
   br label %cond.end14
 
 cond.false13:                                     ; preds = %cond.end9
-  %11 = load i32, i32* @t, align 4
+  %11 = load i32, ptr @t, align 4
   br label %cond.end14
 
 cond.end14:                                       ; preds = %cond.false13, %cond.true12
   %cond15 = phi i32 [ %10, %cond.true12 ], [ %11, %cond.false13 ]
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define i32 @main() nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" {
 entry:
   call void @calc_z() "target-cpu"="mips16" "target-features"="+mips16,+o32"
-  %0 = load i32, i32* @z1, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32"
-  %1 = load i32, i32* @z2, align 4
-  %call1 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32"
-  %2 = load i32, i32* @z3, align 4
-  %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32"
-  %3 = load i32, i32* @z4, align 4
-  %call3 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+  %0 = load i32, ptr @z1, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %0) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+  %1 = load i32, ptr @z2, align 4
+  %call1 = call i32 (ptr, ...) @printf(ptr @.str, i32 %1) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+  %2 = load i32, ptr @z3, align 4
+  %call2 = call i32 (ptr, ...) @printf(ptr @.str, i32 %2) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+  %3 = load i32, ptr @z4, align 4
+  %call3 = call i32 (ptr, ...) @printf(ptr @.str, i32 %3) "target-cpu"="mips16" "target-features"="+mips16,+o32"
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...) "target-cpu"="mips16" "target-features"="+mips16,+o32"
+declare i32 @printf(ptr, ...) "target-cpu"="mips16" "target-features"="+mips16,+o32"
 
 attributes #0 = { nounwind "target-cpu"="mips16" "target-features"="+mips16,+o32" }
 attributes #1 = { "target-cpu"="mips16" "target-features"="+mips16,+o32" }

diff  --git a/llvm/test/CodeGen/Mips/selpat.ll b/llvm/test/CodeGen/Mips/selpat.ll
index ff4bed327f456..d765acbab33a1 100644
--- a/llvm/test/CodeGen/Mips/selpat.ll
+++ b/llvm/test/CodeGen/Mips/selpat.ll
@@ -12,339 +12,339 @@
 
 define void @calc_seleq() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp eq i32 %0, %1
-  %2 = load i32, i32* @f, align 4
-  %3 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @f, align 4
+  %3 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  store i32 %cond, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp eq i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %3, i32 %2
-  store i32 %cond10, i32* @z3, align 4
-  store i32 %cond10, i32* @z4, align 4
+  store i32 %cond10, ptr @z3, align 4
+  store i32 %cond10, ptr @z4, align 4
   ret void
 }
 
 
 define void @calc_seleqk() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp eq i32 %0, 1
-  %1 = load i32, i32* @t, align 4
-  %2 = load i32, i32* @f, align 4
+  %1 = load i32, ptr @t, align 4
+  %2 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	cmpi	${{[0-9]+}}, 1
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp eq i32 %0, 10
   %cond5 = select i1 %cmp1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %3 = load i32, i32* @b, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp6 = icmp eq i32 %3, 3
   %cond10 = select i1 %cmp6, i32 %2, i32 %1
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
 ; 16:	cmpi	${{[0-9]+}}, 10
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp11 = icmp eq i32 %3, 10
   %cond15 = select i1 %cmp11, i32 %1, i32 %2
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define void @calc_seleqz() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp eq i32 %0, 0
-  %1 = load i32, i32* @t, align 4
-  %2 = load i32, i32* @f, align 4
+  %1 = load i32, ptr @t, align 4
+  %2 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	beqz	${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  %3 = load i32, i32* @b, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp1 = icmp eq i32 %3, 0
   %cond5 = select i1 %cmp1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp eq i32 %4, 0
   %cond10 = select i1 %cmp6, i32 %1, i32 %2
-  store i32 %cond10, i32* @z3, align 4
-  store i32 %cond, i32* @z4, align 4
+  store i32 %cond10, ptr @z3, align 4
+  store i32 %cond, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selge() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp sge i32 %0, %1
-  %2 = load i32, i32* @f, align 4
-  %3 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @f, align 4
+  %3 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	slt	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp sge i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp sge i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %3, i32 %2
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp sge i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %3, i32 %2
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define i32 @calc_selgt() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp sgt i32 %0, %1
 ; 16:	slt	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  %2 = load i32, i32* @f, align 4
-  %3 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @f, align 4
+  %3 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
   %cmp1 = icmp sgt i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp sgt i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %2, i32 %3
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp sgt i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %2, i32 %3
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret i32 undef
 }
 
 define void @calc_selle() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp sle i32 %0, %1
-  %2 = load i32, i32* @t, align 4
-  %3 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @t, align 4
+  %3 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	slt	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp sle i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp sle i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %2, i32 %3
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp sle i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %2, i32 %3
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selltk() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp slt i32 %0, 10
-  %1 = load i32, i32* @t, align 4
-  %2 = load i32, i32* @f, align 4
+  %1 = load i32, ptr @t, align 4
+  %2 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	slti	${{[0-9]+}}, {{[0-9]+}}
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  %3 = load i32, i32* @b, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp1 = icmp slt i32 %3, 2
   %cond5 = select i1 %cmp1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp sgt i32 %4, 2
   %cond10 = select i1 %cmp6, i32 %2, i32 %1
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp sgt i32 %0, 2
   %cond15 = select i1 %cmp11, i32 %2, i32 %1
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 
 define void @calc_selne() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp ne i32 %0, %1
-  %2 = load i32, i32* @t, align 4
-  %3 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @t, align 4
+  %3 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	cmp	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  store i32 %cond, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp ne i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %3, i32 %2
-  store i32 %cond10, i32* @z3, align 4
-  store i32 %cond10, i32* @z4, align 4
+  store i32 %cond10, ptr @z3, align 4
+  store i32 %cond10, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selnek() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp ne i32 %0, 1
-  %1 = load i32, i32* @f, align 4
-  %2 = load i32, i32* @t, align 4
+  %1 = load i32, ptr @f, align 4
+  %2 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	cmpi	${{[0-9]+}}, 1
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp ne i32 %0, 10
   %cond5 = select i1 %cmp1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %3 = load i32, i32* @b, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp6 = icmp ne i32 %3, 3
   %cond10 = select i1 %cmp6, i32 %2, i32 %1
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
 ; 16:	cmpi	${{[0-9]+}}, 10
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp11 = icmp ne i32 %3, 10
   %cond15 = select i1 %cmp11, i32 %1, i32 %2
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selnez() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %cmp = icmp ne i32 %0, 0
-  %1 = load i32, i32* @f, align 4
-  %2 = load i32, i32* @t, align 4
+  %1 = load i32, ptr @f, align 4
+  %2 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	bnez	${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  %3 = load i32, i32* @b, align 4
+  %3 = load i32, ptr @b, align 4
   %cmp1 = icmp ne i32 %3, 0
   %cond5 = select i1 %cmp1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp ne i32 %4, 0
   %cond10 = select i1 %cmp6, i32 %1, i32 %2
-  store i32 %cond10, i32* @z3, align 4
-  store i32 %cond, i32* @z4, align 4
+  store i32 %cond10, ptr @z3, align 4
+  store i32 %cond, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selnez2() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
   %tobool = icmp ne i32 %0, 0
-  %1 = load i32, i32* @f, align 4
-  %2 = load i32, i32* @t, align 4
+  %1 = load i32, ptr @f, align 4
+  %2 = load i32, ptr @t, align 4
   %cond = select i1 %tobool, i32 %1, i32 %2
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	bnez	${{[0-9]+}}, $BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
-  %3 = load i32, i32* @b, align 4
+  %3 = load i32, ptr @b, align 4
   %tobool1 = icmp ne i32 %3, 0
   %cond5 = select i1 %tobool1, i32 %2, i32 %1
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %tobool6 = icmp ne i32 %4, 0
   %cond10 = select i1 %tobool6, i32 %1, i32 %2
-  store i32 %cond10, i32* @z3, align 4
-  store i32 %cond, i32* @z4, align 4
+  store i32 %cond10, ptr @z3, align 4
+  store i32 %cond, ptr @z4, align 4
   ret void
 }
 
 define void @calc_seluge() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp uge i32 %0, %1
-  %2 = load i32, i32* @f, align 4
-  %3 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @f, align 4
+  %3 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp uge i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp uge i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %3, i32 %2
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp uge i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %3, i32 %2
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selugt() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp ugt i32 %0, %1
-  %2 = load i32, i32* @f, align 4
-  %3 = load i32, i32* @t, align 4
+  %2 = load i32, ptr @f, align 4
+  %3 = load i32, ptr @t, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	btnez	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp ugt i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp ugt i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %2, i32 %3
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp ugt i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %2, i32 %3
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }
 
 define void @calc_selule() nounwind {
 entry:
-  %0 = load i32, i32* @a, align 4
-  %1 = load i32, i32* @b, align 4
+  %0 = load i32, ptr @a, align 4
+  %1 = load i32, ptr @b, align 4
   %cmp = icmp ule i32 %0, %1
-  %2 = load i32, i32* @t, align 4
-  %3 = load i32, i32* @f, align 4
+  %2 = load i32, ptr @t, align 4
+  %3 = load i32, ptr @f, align 4
   %cond = select i1 %cmp, i32 %2, i32 %3
-  store i32 %cond, i32* @z1, align 4
+  store i32 %cond, ptr @z1, align 4
 ; 16:	sltu	${{[0-9]+}}, ${{[0-9]+}}
 ; 16:	bteqz	$BB{{[0-9]+}}_{{[0-9]}}
 ; 16: 	move    ${{[0-9]+}}, ${{[0-9]+}}
   %cmp1 = icmp ule i32 %1, %0
   %cond5 = select i1 %cmp1, i32 %3, i32 %2
-  store i32 %cond5, i32* @z2, align 4
-  %4 = load i32, i32* @c, align 4
+  store i32 %cond5, ptr @z2, align 4
+  %4 = load i32, ptr @c, align 4
   %cmp6 = icmp ule i32 %4, %0
   %cond10 = select i1 %cmp6, i32 %2, i32 %3
-  store i32 %cond10, i32* @z3, align 4
+  store i32 %cond10, ptr @z3, align 4
   %cmp11 = icmp ule i32 %0, %4
   %cond15 = select i1 %cmp11, i32 %2, i32 %3
-  store i32 %cond15, i32* @z4, align 4
+  store i32 %cond15, ptr @z4, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/setcc-se.ll b/llvm/test/CodeGen/Mips/setcc-se.ll
index b6c6b81943a68..0e74db3c67866 100644
--- a/llvm/test/CodeGen/Mips/setcc-se.ll
+++ b/llvm/test/CodeGen/Mips/setcc-se.ll
@@ -39,7 +39,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -57,7 +57,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -76,7 +76,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -94,7 +94,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -113,7 +113,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -131,7 +131,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -150,7 +150,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:
@@ -168,7 +168,7 @@ entry:
   br i1 %cmp, label %if.then, label %if.end
 
 if.then:
-  store i32 %a, i32* @g1, align 4
+  store i32 %a, ptr @g1, align 4
   br label %if.end
 
 if.end:

diff  --git a/llvm/test/CodeGen/Mips/seteq.ll b/llvm/test/CodeGen/Mips/seteq.ll
index 0f0850f87aab9..37fd1634f796a 100644
--- a/llvm/test/CodeGen/Mips/seteq.ll
+++ b/llvm/test/CodeGen/Mips/seteq.ll
@@ -9,11 +9,11 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp eq i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   xor     $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}}
 ; 16:   sltiu   $[[REGISTER:[0-9A-Ba-b_]+]], 1
 ; MMR6: sltiu   ${{[0-9]+}}, ${{[0-9]+}}, 1

diff  --git a/llvm/test/CodeGen/Mips/seteqz.ll b/llvm/test/CodeGen/Mips/seteqz.ll
index e6111453c25d6..c71ed1d91f72b 100644
--- a/llvm/test/CodeGen/Mips/seteqz.ll
+++ b/llvm/test/CodeGen/Mips/seteqz.ll
@@ -8,17 +8,17 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %cmp = icmp eq i32 %0, 0
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltiu   ${{[0-9]+}}, 1
 ; MMR6: sltiu   ${{[0-9]+}}, ${{[0-9]+}}, 1
 ; 16:   move    ${{[0-9]+}}, $24
-  %1 = load i32, i32* @j, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp1 = icmp eq i32 %1, 99
   %conv2 = zext i1 %cmp1 to i32
-  store i32 %conv2, i32* @r2, align 4
+  store i32 %conv2, ptr @r2, align 4
 ; 16:   xor     $[[REGISTER:[0-9A-Ba-b_]+]], ${{[0-9]+}}
 ; 16:   sltiu   $[[REGISTER:[0-9A-Ba-b_]+]], 1
 ; MMR6: sltiu   ${{[0-9]+}}, ${{[0-9]+}}, 1

diff  --git a/llvm/test/CodeGen/Mips/setge.ll b/llvm/test/CodeGen/Mips/setge.ll
index 0809b6f1b0294..6fb549e24b3fa 100644
--- a/llvm/test/CodeGen/Mips/setge.ll
+++ b/llvm/test/CodeGen/Mips/setge.ll
@@ -12,18 +12,18 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @k, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @k, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp sge i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   slt   ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: slt   ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move  $[[REGISTER:[0-9]+]], $24
 ; 16:   xor   $[[REGISTER]], ${{[0-9]+}}
-  %2 = load i32, i32* @m, align 4
+  %2 = load i32, ptr @m, align 4
   %cmp1 = icmp sge i32 %0, %2
   %conv2 = zext i1 %cmp1 to i32
-  store i32 %conv2, i32* @r2, align 4
+  store i32 %conv2, ptr @r2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/setgek.ll b/llvm/test/CodeGen/Mips/setgek.ll
index 99d8eb22afbc0..661a3f160e325 100644
--- a/llvm/test/CodeGen/Mips/setgek.ll
+++ b/llvm/test/CodeGen/Mips/setgek.ll
@@ -8,10 +8,10 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @k, align 4
   %cmp = icmp sgt i32 %0, -32769
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   slti    ${{[0-9]+}}, -32768
 ; MMR6: slt     ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move    ${{[0-9]+}}, $24

diff  --git a/llvm/test/CodeGen/Mips/setle.ll b/llvm/test/CodeGen/Mips/setle.ll
index 31e2d62ac4c61..35453b8f5f6af 100644
--- a/llvm/test/CodeGen/Mips/setle.ll
+++ b/llvm/test/CodeGen/Mips/setle.ll
@@ -11,18 +11,18 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp sle i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   slt   ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: slt   ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move  $[[REGISTER:[0-9]+]], $24
 ; 16:   xor   $[[REGISTER]], ${{[0-9]+}}
-  %2 = load i32, i32* @m, align 4
+  %2 = load i32, ptr @m, align 4
   %cmp1 = icmp sle i32 %2, %1
   %conv2 = zext i1 %cmp1 to i32
-  store i32 %conv2, i32* @r2, align 4
+  store i32 %conv2, ptr @r2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/setlt.ll b/llvm/test/CodeGen/Mips/setlt.ll
index 77ca71ee178b4..fcf9a25be72c0 100644
--- a/llvm/test/CodeGen/Mips/setlt.ll
+++ b/llvm/test/CodeGen/Mips/setlt.ll
@@ -11,11 +11,11 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp slt i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   slt     ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: slt     ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move    ${{[0-9]+}}, $24

diff  --git a/llvm/test/CodeGen/Mips/setltk.ll b/llvm/test/CodeGen/Mips/setltk.ll
index aefe48bd8d96c..95aa9ea07d245 100644
--- a/llvm/test/CodeGen/Mips/setltk.ll
+++ b/llvm/test/CodeGen/Mips/setltk.ll
@@ -11,10 +11,10 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @j, align 4
   %cmp = icmp slt i32 %0, 10
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   slti    $[[REGISTER:[0-9]+]], 10
 ; MMR6: slti    $[[REGISTER:[0-9]+]], $[[REGISTER:[0-9]+]], 10
 ; 16:   move    $[[REGISTER]], $24

diff  --git a/llvm/test/CodeGen/Mips/setne.ll b/llvm/test/CodeGen/Mips/setne.ll
index c2c0f1a2f97ec..d9396b1bf7754 100644
--- a/llvm/test/CodeGen/Mips/setne.ll
+++ b/llvm/test/CodeGen/Mips/setne.ll
@@ -9,11 +9,11 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp ne i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   xor     $[[REGISTER:[0-9]+]], ${{[0-9]+}}
 ; 16:   sltu    ${{[0-9]+}}, $[[REGISTER]]
 ; MMR6: sltu    ${{[0-9]+}}, $zero, ${{[0-9]+}}

diff  --git a/llvm/test/CodeGen/Mips/setuge.ll b/llvm/test/CodeGen/Mips/setuge.ll
index dd9e5b5a2eaf3..afdd4ef733758 100644
--- a/llvm/test/CodeGen/Mips/setuge.ll
+++ b/llvm/test/CodeGen/Mips/setuge.ll
@@ -11,18 +11,18 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @k, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @k, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp uge i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltu  ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: sltu  ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move  $[[REGISTER:[0-9]+]], $24
 ; 16:   xor   $[[REGISTER]], ${{[0-9]+}}
-  %2 = load i32, i32* @m, align 4
+  %2 = load i32, ptr @m, align 4
   %cmp1 = icmp uge i32 %0, %2
   %conv2 = zext i1 %cmp1 to i32
-  store i32 %conv2, i32* @r2, align 4
+  store i32 %conv2, ptr @r2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/setugt.ll b/llvm/test/CodeGen/Mips/setugt.ll
index 95854851c73fd..afe4e4b09c781 100644
--- a/llvm/test/CodeGen/Mips/setugt.ll
+++ b/llvm/test/CodeGen/Mips/setugt.ll
@@ -11,11 +11,11 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @k, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @k, align 4
+  %1 = load i32, ptr @j, align 4
   %cmp = icmp ugt i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltu    ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: sltu    ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move    ${{[0-9]+}}, $24

diff  --git a/llvm/test/CodeGen/Mips/setule.ll b/llvm/test/CodeGen/Mips/setule.ll
index ef18c62db4c92..024e7701fad4d 100644
--- a/llvm/test/CodeGen/Mips/setule.ll
+++ b/llvm/test/CodeGen/Mips/setule.ll
@@ -11,18 +11,18 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp ule i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltu  ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: sltu  ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move  $[[REGISTER:[0-9]+]], $24
 ; 16:   xor   $[[REGISTER]], ${{[0-9]+}}
-  %2 = load i32, i32* @m, align 4
+  %2 = load i32, ptr @m, align 4
   %cmp1 = icmp ule i32 %2, %1
   %conv2 = zext i1 %cmp1 to i32
-  store i32 %conv2, i32* @r2, align 4
+  store i32 %conv2, ptr @r2, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/setult.ll b/llvm/test/CodeGen/Mips/setult.ll
index b03e26736f949..ef354ec0971e4 100644
--- a/llvm/test/CodeGen/Mips/setult.ll
+++ b/llvm/test/CodeGen/Mips/setult.ll
@@ -11,11 +11,11 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @k, align 4
   %cmp = icmp ult i32 %0, %1
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltu    ${{[0-9]+}}, ${{[0-9]+}}
 ; MMR6: sltu    ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
 ; 16:   move    ${{[0-9]+}}, $24

diff  --git a/llvm/test/CodeGen/Mips/setultk.ll b/llvm/test/CodeGen/Mips/setultk.ll
index a0b0bbf77cc41..9801d388d2232 100644
--- a/llvm/test/CodeGen/Mips/setultk.ll
+++ b/llvm/test/CodeGen/Mips/setultk.ll
@@ -11,10 +11,10 @@
 
 define void @test() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @j, align 4
   %cmp = icmp ult i32 %0, 10
   %conv = zext i1 %cmp to i32
-  store i32 %conv, i32* @r1, align 4
+  store i32 %conv, ptr @r1, align 4
 ; 16:   sltiu   ${{[0-9]+}}, 10 # 16 bit inst
 ; MMR6: sltiu   ${{[0-9]+}}, ${{[0-9]+}}, 1
 ; 16:   move    ${{[0-9]+}}, $24

diff  --git a/llvm/test/CodeGen/Mips/sh1.ll b/llvm/test/CodeGen/Mips/sh1.ll
index ccba32a4cca9d..6bfa9f56231f5 100644
--- a/llvm/test/CodeGen/Mips/sh1.ll
+++ b/llvm/test/CodeGen/Mips/sh1.ll
@@ -6,15 +6,15 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %conv = trunc i32 %0 to i16
-  store i16 %conv, i16* @s, align 2
-  %1 = load i32, i32* @i, align 4
-  %2 = load i16, i16* @s, align 2
+  store i16 %conv, ptr @s, align 2
+  %1 = load i32, ptr @i, align 4
+  %2 = load i16, ptr @s, align 2
   %conv1 = sext i16 %2 to i32
 ; 16:	sh	${{[0-9]+}}, 0(${{[0-9]+}})
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %1, i32 %conv1)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1, i32 %conv1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/simplebr.ll b/llvm/test/CodeGen/Mips/simplebr.ll
index 670ab7a0af119..ba97bdd89c179 100644
--- a/llvm/test/CodeGen/Mips/simplebr.ll
+++ b/llvm/test/CodeGen/Mips/simplebr.ll
@@ -9,16 +9,16 @@ target triple = "mips--linux-gnu"
 ; Function Attrs: nounwind
 define void @foo() #0 {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %tobool = icmp ne i32 %0, 0
   br i1 %tobool, label %if.then, label %if.else
 
 if.then:                                          ; preds = %entry
-  call void bitcast (void (...)* @goo to void ()*)()
+  call void @goo()
   br label %if.end
 
 if.else:                                          ; preds = %entry
-  call void bitcast (void (...)* @hoo to void ()*)()
+  call void @hoo()
   br label %if.end
 
 if.end:                                           ; preds = %if.else, %if.then

diff  --git a/llvm/test/CodeGen/Mips/sint-fp-store_pattern.ll b/llvm/test/CodeGen/Mips/sint-fp-store_pattern.ll
index 2735d787432d4..4d15f428e44be 100644
--- a/llvm/test/CodeGen/Mips/sint-fp-store_pattern.ll
+++ b/llvm/test/CodeGen/Mips/sint-fp-store_pattern.ll
@@ -12,7 +12,7 @@
 define void @store_int_float_(float %a) {
 entry:
   %conv = fptosi float %a to i32
-  store i32 %conv, i32* @gint_, align 4
+  store i32 %conv, ptr @gint_, align 4
   ret void
 }
 
@@ -26,7 +26,7 @@ entry:
 define void @store_int_double_(double %a) {
 entry:
   %conv = fptosi double %a to i32
-  store i32 %conv, i32* @gint_, align 4
+  store i32 %conv, ptr @gint_, align 4
   ret void
 }
 
@@ -37,7 +37,7 @@ entry:
 define void @store_LL_float_(float %a) {
 entry:
   %conv = fptosi float %a to i64
-  store i64 %conv, i64* @gLL_, align 8
+  store i64 %conv, ptr @gLL_, align 8
   ret void
 }
 
@@ -48,6 +48,6 @@ entry:
 define void @store_LL_double_(double %a) {
 entry:
   %conv = fptosi double %a to i64
-  store i64 %conv, i64* @gLL_, align 8
+  store i64 %conv, ptr @gLL_, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll b/llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll
index 751fba46d72fa..3e2de21b63cae 100644
--- a/llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll
+++ b/llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll
@@ -14,8 +14,8 @@ entry:
   %tobool1. = or i1 %tobool1, %not.tobool
   %lor.ext = zext i1 %tobool1. to i32
   %conv = sitofp i32 %lor.ext to double
-  %1 = load double, double* @foo12.d4, align 8
+  %1 = load double, ptr @foo12.d4, align 8
   %add = fadd double %conv, %1
-  store double %add, double* @foo12.d4, align 8
+  store double %add, ptr @foo12.d4, align 8
   ret double %add
 }

diff  --git a/llvm/test/CodeGen/Mips/sll1.ll b/llvm/test/CodeGen/Mips/sll1.ll
index 93b814f944c50..bf36682100d9f 100644
--- a/llvm/test/CodeGen/Mips/sll1.ll
+++ b/llvm/test/CodeGen/Mips/sll1.ll
@@ -7,13 +7,13 @@
 define i32 @main() nounwind {
 entry:
 ; 16:	sll	${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %shl = shl i32 %0, 4
 ; 16:	sll	${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
-  store i32 %shl, i32* @j, align 4
-  %1 = load i32, i32* @j, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+  store i32 %shl, ptr @j, align 4
+  %1 = load i32, ptr @j, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/sll2.ll b/llvm/test/CodeGen/Mips/sll2.ll
index f30108d14df89..5de0b561cc989 100644
--- a/llvm/test/CodeGen/Mips/sll2.ll
+++ b/llvm/test/CodeGen/Mips/sll2.ll
@@ -6,14 +6,14 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %shl = shl i32 %0, %1
 ; 16:	sllv	${{[0-9]+}}, ${{[0-9]+}}
-  store i32 %shl, i32* @i, align 4
-  %2 = load i32, i32* @j, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2)
+  store i32 %shl, ptr @i, align 4
+  %2 = load i32, ptr @j, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %2)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/small-section-reserve-gp.ll b/llvm/test/CodeGen/Mips/small-section-reserve-gp.ll
index c4e37665aaf6f..cbe1218c34215 100644
--- a/llvm/test/CodeGen/Mips/small-section-reserve-gp.ll
+++ b/llvm/test/CodeGen/Mips/small-section-reserve-gp.ll
@@ -6,7 +6,7 @@
 define i32 @geti() nounwind readonly {
 entry:
 ; CHECK: lw ${{[0-9]+}}, %gp_rel(i)($gp)
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   ret i32 %0
 }
 

diff  --git a/llvm/test/CodeGen/Mips/spill-copy-acreg.ll b/llvm/test/CodeGen/Mips/spill-copy-acreg.ll
index fd160b67cf245..ea24bebe3e5f9 100644
--- a/llvm/test/CodeGen/Mips/spill-copy-acreg.ll
+++ b/llvm/test/CodeGen/Mips/spill-copy-acreg.ll
@@ -6,13 +6,13 @@
 
 define i64 @test_acreg_copy(i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
 entry:
-  %0 = load i64, i64* @g1, align 8
+  %0 = load i64, ptr @g1, align 8
   %1 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a0, i32 %a1)
   %2 = tail call i64 @llvm.mips.maddu(i64 %0, i32 %a2, i32 %a3)
-  store i64 %1, i64* @g1, align 8
-  store i64 %2, i64* @g2, align 8
+  store i64 %1, ptr @g1, align 8
+  store i64 %2, ptr @g2, align 8
   tail call void @foo1()
-  store i64 %2, i64* @g3, align 8
+  store i64 %2, ptr @g3, align 8
   ret i64 %1
 }
 
@@ -30,10 +30,10 @@ entry:
   %1 = bitcast i32 %b.coerce to <2 x i16>
   %cmp3 = icmp slt <2 x i16> %0, %1
   %sext = sext <2 x i1> %cmp3 to <2 x i16>
-  store <2 x i16> %sext, <2 x i16>* @g4, align 4
+  store <2 x i16> %sext, ptr @g4, align 4
   tail call void @foo1()
-  %2 = load <2 x i16>, <2 x i16>* @g5, align 4
-  %3 = load <2 x i16>, <2 x i16>* @g6, align 4
+  %2 = load <2 x i16>, ptr @g5, align 4
+  %3 = load <2 x i16>, ptr @g6, align 4
   %or = select <2 x i1> %cmp3, <2 x i16> %2, <2 x i16> %3
   %4 = bitcast <2 x i16> %or to i32
   %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0

diff  --git a/llvm/test/CodeGen/Mips/sr1.ll b/llvm/test/CodeGen/Mips/sr1.ll
index 77a042167e134..47948c488aab2 100644
--- a/llvm/test/CodeGen/Mips/sr1.ll
+++ b/llvm/test/CodeGen/Mips/sr1.ll
@@ -8,10 +8,8 @@
 define void @foo1() #0 {
 entry:
   %c = alloca [10 x i8], align 1
-  %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
-  call void @x(i8* %arraydecay)
-  %arraydecay1 = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
-  call void @x(i8* %arraydecay1)
+  call void @x(ptr %c)
+  call void @x(ptr %c)
   ret void
 ; CHECK: 	.ent	foo1
 ; CHECK: 	save	$16, $17, $ra, [[FS:[0-9]+]]  # 16 bit inst
@@ -19,16 +17,14 @@ entry:
 ; CHECK: 	.end	foo1
 }
 
-declare void @x(i8*) #1
+declare void @x(ptr) #1
 
 ; Function Attrs: nounwind
 define void @foo2() #0 {
 entry:
   %c = alloca [150 x i8], align 1
-  %arraydecay = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
-  call void @x(i8* %arraydecay)
-  %arraydecay1 = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
-  call void @x(i8* %arraydecay1)
+  call void @x(ptr %c)
+  call void @x(ptr %c)
   ret void
 ; CHECK: 	.ent	foo2
 ; CHECK: 	save	$16, $17, $ra, [[FS:[0-9]+]] 
@@ -40,7 +36,7 @@ entry:
 define void @foo3() #0 {
 entry:
   %call = call float @xf()
-  store float %call, float* @f, align 4
+  store float %call, ptr @f, align 4
   ret void
 ; CHECK: 	.ent	foo3
 ; CHECK: 	save	$16, $17, $ra, $18, [[FS:[0-9]+]]

diff  --git a/llvm/test/CodeGen/Mips/sra1.ll b/llvm/test/CodeGen/Mips/sra1.ll
index 51282bd8033d3..08286760d5cda 100644
--- a/llvm/test/CodeGen/Mips/sra1.ll
+++ b/llvm/test/CodeGen/Mips/sra1.ll
@@ -5,11 +5,11 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %shr = ashr i32 %0, 3
 ; 16:	sra	${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %shr)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/sra2.ll b/llvm/test/CodeGen/Mips/sra2.ll
index 0a2bff9e40804..2dd966b0ff843 100644
--- a/llvm/test/CodeGen/Mips/sra2.ll
+++ b/llvm/test/CodeGen/Mips/sra2.ll
@@ -6,12 +6,12 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @j, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @j, align 4
   %shr = ashr i32 %0, %1
 ; 16:	srav	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %shr)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %shr)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/srl1.ll b/llvm/test/CodeGen/Mips/srl1.ll
index 8e97734bb313c..21cdeda4af448 100644
--- a/llvm/test/CodeGen/Mips/srl1.ll
+++ b/llvm/test/CodeGen/Mips/srl1.ll
@@ -6,13 +6,13 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %shr = lshr i32 %0, 4
 ; 16:	srl	${{[0-9]+}}, ${{[0-9]+}}, {{[0-9]+}}
-  store i32 %shr, i32* @j, align 4
-  %1 = load i32, i32* @j, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %1)
+  store i32 %shr, ptr @j, align 4
+  %1 = load i32, ptr @j, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %1)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/srl2.ll b/llvm/test/CodeGen/Mips/srl2.ll
index 4ccdefdf14137..b433893822dc5 100644
--- a/llvm/test/CodeGen/Mips/srl2.ll
+++ b/llvm/test/CodeGen/Mips/srl2.ll
@@ -7,14 +7,14 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
-  %1 = load i32, i32* @k, align 4
+  %0 = load i32, ptr @i, align 4
+  %1 = load i32, ptr @k, align 4
   %shr = lshr i32 %0, %1
 ; 16:	srlv	${{[0-9]+}}, ${{[0-9]+}}
-  store i32 %shr, i32* @j, align 4
-  %2 = load i32, i32* @j, align 4
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), i32 %2)
+  store i32 %shr, ptr @j, align 4
+  %2 = load i32, ptr @j, align 4
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %2)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/stackcoloring.ll b/llvm/test/CodeGen/Mips/stackcoloring.ll
index 680b3128cc1b4..aeedbc8abccd0 100644
--- a/llvm/test/CodeGen/Mips/stackcoloring.ll
+++ b/llvm/test/CodeGen/Mips/stackcoloring.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -march=mipsel -relocation-model=pic < %s | FileCheck %s
 
- at g1 = external global i32*
+ at g1 = external global ptr
 
 ; CHECK-LABEL: foo1:
 ; CHECK: lw ${{[0-9]+}}, %got(g1)
@@ -10,30 +10,28 @@
 define i32 @foo1() {
 entry:
   %b = alloca [16 x i32], align 4
-  %0 = bitcast [16 x i32]* %b to i8*
-  call void @llvm.lifetime.start.p0i8(i64 64, i8* %0)
-  %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %b, i32 0, i32 0
+  call void @llvm.lifetime.start.p0(i64 64, ptr %b)
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %entry
   %i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
   %v.04 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-  %1 = load i32*, i32** @g1, align 4
-  %arrayidx = getelementptr inbounds i32, i32* %1, i32 %i.05
-  %2 = load i32, i32* %arrayidx, align 4
-  %call = call i32 @foo2(i32 %2, i32* %arraydecay)
+  %0 = load ptr, ptr @g1, align 4
+  %arrayidx = getelementptr inbounds i32, ptr %0, i32 %i.05
+  %1 = load i32, ptr %arrayidx, align 4
+  %call = call i32 @foo2(i32 %1, ptr %b)
   %add = add nsw i32 %call, %v.04
   %inc = add nsw i32 %i.05, 1
   %exitcond = icmp eq i32 %inc, 10000
   br i1 %exitcond, label %for.end, label %for.body
 
 for.end:                                          ; preds = %for.body
-  call void @llvm.lifetime.end.p0i8(i64 64, i8* %0)
+  call void @llvm.lifetime.end.p0(i64 64, ptr %b)
   ret i32 %add
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture)
 
-declare i32 @foo2(i32, i32*)
+declare i32 @foo2(i32, ptr)
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture)

diff  --git a/llvm/test/CodeGen/Mips/stchar.ll b/llvm/test/CodeGen/Mips/stchar.ll
index a6021be8e808e..0a2dfbe1183a6 100644
--- a/llvm/test/CodeGen/Mips/stchar.ll
+++ b/llvm/test/CodeGen/Mips/stchar.ll
@@ -2,29 +2,29 @@
 ; RUN: llc  -march=mipsel -mattr=mips16 -relocation-model=pic -O3 < %s | FileCheck %s -check-prefix=16_b
 
 @.str = private unnamed_addr constant [9 x i8] c"%hd %c \0A\00", align 1
- at sp = common global i16* null, align 4
- at cp = common global i8* null, align 4
+ at sp = common global ptr null, align 4
+ at cp = common global ptr null, align 4
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind
 
 define void @test() nounwind {
 entry:
   %s = alloca i16, align 4
   %c = alloca i8, align 4
-  store i16 16, i16* %s, align 4
-  store i8 99, i8* %c, align 4
-  store i16* %s, i16** @sp, align 4
-  store i8* %c, i8** @cp, align 4
-  %call.i.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 16, i32 99) nounwind
-  %0 = load i16*, i16** @sp, align 4
-  store i16 32, i16* %0, align 2
-  %1 = load i8*, i8** @cp, align 4
-  store i8 97, i8* %1, align 1
-  %2 = load i16, i16* %s, align 4
-  %3 = load i8, i8* %c, align 4
+  store i16 16, ptr %s, align 4
+  store i8 99, ptr %c, align 4
+  store ptr %s, ptr @sp, align 4
+  store ptr %c, ptr @cp, align 4
+  %call.i.i = call i32 (ptr, ...) @printf(ptr @.str, i32 16, i32 99) nounwind
+  %0 = load ptr, ptr @sp, align 4
+  store i16 32, ptr %0, align 2
+  %1 = load ptr, ptr @cp, align 4
+  store i8 97, ptr %1, align 1
+  %2 = load i16, ptr %s, align 4
+  %3 = load i8, ptr %c, align 4
   %conv.i = sext i16 %2 to i32
   %conv1.i = sext i8 %3 to i32
-  %call.i = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i32 0, i32 0), i32 %conv.i, i32 %conv1.i) nounwind
+  %call.i = call i32 (ptr, ...) @printf(ptr @.str, i32 %conv.i, i32 %conv1.i) nounwind
   ret void
 ; 16_b-LABEL: test:
 ; 16_h-LABEL: test:
@@ -34,7 +34,7 @@ entry:
 ; 16_h: lh      ${{[0-9]+}}, [[offset2]](${{[0-9]+}})
 }
 
-declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
 
-declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) nounwind
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
 

diff  --git a/llvm/test/CodeGen/Mips/stldst.ll b/llvm/test/CodeGen/Mips/stldst.ll
index 62d5f1f92b450..0208d62b4f8ab 100644
--- a/llvm/test/CodeGen/Mips/stldst.ll
+++ b/llvm/test/CodeGen/Mips/stldst.ll
@@ -12,25 +12,25 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @kkkk, align 4
-  %1 = load i32, i32* @llll, align 4
+  %0 = load i32, ptr @kkkk, align 4
+  %1 = load i32, ptr @llll, align 4
   %add = add nsw i32 %0, 10
   %add1 = add nsw i32 %1, 10
-  %2 = load i32, i32* @mmmm, align 4
+  %2 = load i32, ptr @mmmm, align 4
   %sub = add nsw i32 %2, -3
-  %3 = load i32, i32* @nnnn, align 4
+  %3 = load i32, ptr @nnnn, align 4
   %add2 = add nsw i32 %3, 10
-  %4 = load i32, i32* @oooo, align 4
+  %4 = load i32, ptr @oooo, align 4
   %add3 = add nsw i32 %4, 4
-  %5 = load i32, i32* @pppp, align 4
+  %5 = load i32, ptr @pppp, align 4
   %sub4 = add nsw i32 %5, -5
-  %6 = load i32, i32* @qqqq, align 4
+  %6 = load i32, ptr @qqqq, align 4
   %sub5 = add nsw i32 %6, -10
-  %7 = load i32, i32* @rrrr, align 4
+  %7 = load i32, ptr @rrrr, align 4
   %add6 = add nsw i32 %7, 6
 
-  %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind
-  %call7 = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind
+  %call = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %sub5, i32 %add6, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) nounwind
+  %call7 = tail call i32 (ptr, ...) @printf(ptr @.str, i32 %0, i32 %1, i32 %add, i32 %add1, i32 %sub, i32 %add2, i32 %add3, i32 %sub4, i32 %sub5, i32 %add6) nounwind
   ret i32 0
 }
 ; 16:	sw	${{[0-9]+}}, {{[0-9]+}}($sp)         # 4-byte Folded Spill
@@ -38,4 +38,4 @@ entry:
 ; 16:	sw	${{[0-9]+}}, {{[0-9]+}}($sp)         # 4-byte Folded Spill
 ; 16:	lw	${{[0-9]+}}, {{[0-9]+}}($sp)         # 4-byte Folded Reload
 
-declare i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(ptr nocapture, ...) nounwind

diff  --git a/llvm/test/CodeGen/Mips/sub1.ll b/llvm/test/CodeGen/Mips/sub1.ll
index a5e6988402649..8e30a1732ce4e 100644
--- a/llvm/test/CodeGen/Mips/sub1.ll
+++ b/llvm/test/CodeGen/Mips/sub1.ll
@@ -5,11 +5,11 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @i, align 4
   %sub = sub nsw i32 %0, 5
 ; 16:	addiu	${{[0-9]+}}, -{{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/sub2.ll b/llvm/test/CodeGen/Mips/sub2.ll
index d10cddb9e6b9e..5d6296d279e46 100644
--- a/llvm/test/CodeGen/Mips/sub2.ll
+++ b/llvm/test/CodeGen/Mips/sub2.ll
@@ -6,12 +6,12 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @j, align 4
-  %1 = load i32, i32* @i, align 4
+  %0 = load i32, ptr @j, align 4
+  %1 = load i32, ptr @i, align 4
   %sub = sub nsw i32 %0, %1
 ; 16:	subu	${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %sub)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %sub)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/swzero.ll b/llvm/test/CodeGen/Mips/swzero.ll
index 9aaee1509806f..cc17a1559db96 100644
--- a/llvm/test/CodeGen/Mips/swzero.ll
+++ b/llvm/test/CodeGen/Mips/swzero.ll
@@ -2,19 +2,18 @@
 
 %struct.unaligned = type <{ i32 }>
 
-define void @zero_u(%struct.unaligned* nocapture %p) nounwind {
+define void @zero_u(ptr nocapture %p) nounwind {
 entry:
 ; CHECK: swl $zero
 ; CHECK: swr $zero
-  %x = getelementptr inbounds %struct.unaligned, %struct.unaligned* %p, i32 0, i32 0
-  store i32 0, i32* %x, align 1
+  store i32 0, ptr %p, align 1
   ret void
 }
 
-define void @zero_a(i32* nocapture %p) nounwind {
+define void @zero_a(ptr nocapture %p) nounwind {
 entry:
 ; CHECK: sw $zero
-  store i32 0, i32* %p, align 4
+  store i32 0, ptr %p, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/tail16.ll b/llvm/test/CodeGen/Mips/tail16.ll
index 75a2a827f258a..831de34f8d89d 100644
--- a/llvm/test/CodeGen/Mips/tail16.ll
+++ b/llvm/test/CodeGen/Mips/tail16.ll
@@ -3,7 +3,7 @@
 ; Function Attrs: nounwind optsize
 define float @h()  {
 entry:
-  %call = tail call float bitcast (float (...)* @g to float ()*)() 
+  %call = tail call float @g() 
   ret float %call
 ; CHECK:	.ent	h
 ; CHECK: 	save	$16, $ra, $18, 32

diff  --git a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
index 40abb3af3940f..3b200780b9f59 100644
--- a/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
+++ b/llvm/test/CodeGen/Mips/tailcall/tailcall.ll
@@ -125,16 +125,16 @@ entry:
 ; STATIC64: j
 ; PIC16: jalrc
 
-  %0 = load i32, i32* @g0, align 4
-  %1 = load i32, i32* @g1, align 4
-  %2 = load i32, i32* @g2, align 4
-  %3 = load i32, i32* @g3, align 4
-  %4 = load i32, i32* @g4, align 4
-  %5 = load i32, i32* @g5, align 4
-  %6 = load i32, i32* @g6, align 4
-  %7 = load i32, i32* @g7, align 4
-  %8 = load i32, i32* @g8, align 4
-  %9 = load i32, i32* @g9, align 4
+  %0 = load i32, ptr @g0, align 4
+  %1 = load i32, ptr @g1, align 4
+  %2 = load i32, ptr @g2, align 4
+  %3 = load i32, ptr @g3, align 4
+  %4 = load i32, ptr @g4, align 4
+  %5 = load i32, ptr @g5, align 4
+  %6 = load i32, ptr @g6, align 4
+  %7 = load i32, ptr @g7, align 4
+  %8 = load i32, ptr @g8, align 4
+  %9 = load i32, ptr @g9, align 4
   %call = tail call fastcc i32 @callee5(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9)
   ret i32 %call
 }
@@ -192,7 +192,7 @@ entry:
 
 @gs1 = external global %struct.S
 
-declare i32 @callee9(%struct.S* byval(%struct.S))
+declare i32 @callee9(ptr byval(%struct.S))
 
 define i32 @caller9_0() nounwind {
 entry:
@@ -223,7 +223,7 @@ entry:
 ; PIC64R6: jalrc $25
 ; PIC16: jalrc
 
-  %call = tail call i32 @callee9(%struct.S* byval(%struct.S) @gs1) nounwind
+  %call = tail call i32 @callee9(ptr byval(%struct.S) @gs1) nounwind
   ret i32 %call
 }
 
@@ -246,7 +246,7 @@ entry:
   ret i32 %call
 }
 
-declare i32 @callee11(%struct.S* byval(%struct.S))
+declare i32 @callee11(ptr byval(%struct.S))
 
 define i32 @caller11() nounwind noinline {
 entry:
@@ -261,15 +261,15 @@ entry:
 ; PIC64R6: jalrc $25
 ; PIC16: jalrc
 
-  %call = tail call i32 @callee11(%struct.S* byval(%struct.S) @gs1) nounwind
+  %call = tail call i32 @callee11(ptr byval(%struct.S) @gs1) nounwind
   ret i32 %call
 }
 
 declare i32 @callee12()
 
-declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
+declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture, i32, i1) nounwind
 
-define i32 @caller12(%struct.S* nocapture byval(%struct.S) %a0) nounwind {
+define i32 @caller12(ptr nocapture byval(%struct.S) %a0) nounwind {
 entry:
 ; ALL-LABEL: caller12:
 ; PIC32: jalr $25
@@ -282,8 +282,7 @@ entry:
 ; PIC64R6: jalrc $25
 ; PIC16: jalrc
 
-  %0 = bitcast %struct.S* %a0 to i8*
-  tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast (%struct.S* @gs1 to i8*), i8* align 4 %0, i32 8, i1 false)
+  tail call void @llvm.memcpy.p0.p0.i32(ptr align 4 @gs1, ptr align 4 %a0, i32 8, i1 false)
   %call = tail call i32 @callee12() nounwind
   ret i32 %call
 }

diff  --git a/llvm/test/CodeGen/Mips/tglobaladdr-wrapper.ll b/llvm/test/CodeGen/Mips/tglobaladdr-wrapper.ll
index 9c702a910e735..d791e37cd41fb 100644
--- a/llvm/test/CodeGen/Mips/tglobaladdr-wrapper.ll
+++ b/llvm/test/CodeGen/Mips/tglobaladdr-wrapper.ll
@@ -56,11 +56,11 @@ define void @foo() {
 ; 64-NEXT:    jr $ra
 ; 64-NEXT:    sw $1, 0($2)
 entry:
-  %0 = load i32, i32* @x, align 4
+  %0 = load i32, ptr @x, align 4
   %cmp2 = icmp eq i32 %0, 0
-  %1 = load i32, i32* @a, align 4
-  %2 = load i32, i32* @b, align 4
+  %1 = load i32, ptr @a, align 4
+  %2 = load i32, ptr @b, align 4
   %cond = select i1 %cmp2, i32 %1, i32 %2
-  store i32 %cond, i32* @x, align 4
+  store i32 %cond, ptr @x, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/thread-pointer.ll b/llvm/test/CodeGen/Mips/thread-pointer.ll
index 60bee3d030319..b460d6dd0a18b 100644
--- a/llvm/test/CodeGen/Mips/thread-pointer.ll
+++ b/llvm/test/CodeGen/Mips/thread-pointer.ll
@@ -3,10 +3,10 @@
 ; RUN: llc -march=mipsel < %s | FileCheck %s
 ; RUN: llc -march=mips64el < %s | FileCheck %s
 
-declare i8* @llvm.thread.pointer() nounwind readnone
+declare ptr @llvm.thread.pointer() nounwind readnone
 
-define i8* @thread_pointer() {
+define ptr @thread_pointer() {
 ; CHECK: rdhwr $3, $29
-  %1 = tail call i8* @llvm.thread.pointer()
-  ret i8* %1
+  %1 = tail call ptr @llvm.thread.pointer()
+  ret ptr %1
 }

diff  --git a/llvm/test/CodeGen/Mips/tls-alias.ll b/llvm/test/CodeGen/Mips/tls-alias.ll
index 5de23103c9978..1842830880778 100644
--- a/llvm/test/CodeGen/Mips/tls-alias.ll
+++ b/llvm/test/CodeGen/Mips/tls-alias.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -march=mipsel -relocation-model=pic -disable-mips-delay-filler < %s | FileCheck %s
 
 @foo = thread_local global i32 42
- at bar = hidden thread_local alias i32, i32* @foo
+ at bar = hidden thread_local alias i32, ptr @foo
 
-define i32* @zed() {
+define ptr @zed() {
 ; CHECK-DAG: __tls_get_addr
 ; CHECK-DAG: %tlsldm(bar)
-       ret i32* @bar
+       ret ptr @bar
 }

diff  --git a/llvm/test/CodeGen/Mips/tls-models.ll b/llvm/test/CodeGen/Mips/tls-models.ll
index 31c4438dda3f9..ccaac3db7eef0 100644
--- a/llvm/test/CodeGen/Mips/tls-models.ll
+++ b/llvm/test/CodeGen/Mips/tls-models.ll
@@ -15,9 +15,9 @@
 
 ; ----- no model specified -----
 
-define i32* @f1() {
+define ptr @f1() {
 entry:
-  ret i32* @external_gd
+  ret ptr @external_gd
 
   ; Non-PIC code can use initial-exec, PIC code has to use general dynamic.
   ; CHECK-NONPIC-LABEL:   f1:
@@ -26,9 +26,9 @@ entry:
   ; CHECK-PIC:      %tlsgd
 }
 
-define i32* @f2() {
+define ptr @f2() {
 entry:
-  ret i32* @internal_gd
+  ret ptr @internal_gd
 
   ; Non-PIC code can use local exec, PIC code can use local dynamic.
   ; CHECK-NONPIC-LABEL:   f2:
@@ -40,9 +40,9 @@ entry:
 
 ; ----- localdynamic specified -----
 
-define i32* @f3() {
+define ptr @f3() {
 entry:
-  ret i32* @external_ld
+  ret ptr @external_ld
 
   ; Non-PIC code can use initial exec, PIC should use local dynamic.
   ; CHECK-NONPIC-LABEL:   f3:
@@ -51,9 +51,9 @@ entry:
   ; CHECK-PIC:      %tlsldm
 }
 
-define i32* @f4() {
+define ptr @f4() {
 entry:
-  ret i32* @internal_ld
+  ret ptr @internal_ld
 
   ; Non-PIC code can use local exec, PIC code can use local dynamic.
   ; CHECK-NONPIC-LABEL:   f4:
@@ -65,9 +65,9 @@ entry:
 
 ; ----- initialexec specified -----
 
-define i32* @f5() {
+define ptr @f5() {
 entry:
-  ret i32* @external_ie
+  ret ptr @external_ie
 
   ; Non-PIC and PIC code will use initial exec as specified.
   ; CHECK-NONPIC-LABEL:   f5:
@@ -76,9 +76,9 @@ entry:
   ; CHECK-PIC:      %gottprel
 }
 
-define i32* @f6() {
+define ptr @f6() {
 entry:
-  ret i32* @internal_ie
+  ret ptr @internal_ie
 
   ; Non-PIC code can use local exec, PIC code use initial exec as specified.
   ; CHECK-NONPIC-LABEL:   f6:
@@ -90,9 +90,9 @@ entry:
 
 ; ----- localexec specified -----
 
-define i32* @f7() {
+define ptr @f7() {
 entry:
-  ret i32* @external_le
+  ret ptr @external_le
 
   ; Non-PIC and PIC code will use local exec as specified.
   ; CHECK-NONPIC-LABEL:   f7:
@@ -101,9 +101,9 @@ entry:
   ; CHECK-PIC:      %tprel_hi
 }
 
-define i32* @f8() {
+define ptr @f8() {
 entry:
-  ret i32* @internal_le
+  ret ptr @internal_le
 
   ; Non-PIC and PIC code will use local exec as specified.
   ; CHECK-NONPIC-LABEL:   f8:

diff  --git a/llvm/test/CodeGen/Mips/tls-static.ll b/llvm/test/CodeGen/Mips/tls-static.ll
index 584487c4c95f0..71517a64f5bac 100644
--- a/llvm/test/CodeGen/Mips/tls-static.ll
+++ b/llvm/test/CodeGen/Mips/tls-static.ll
@@ -12,7 +12,7 @@
 
 define dso_local i32 @f1() nounwind {
 entry:
-  %tmp = load i32, i32* @t1, align 4
+  %tmp = load i32, ptr @t1, align 4
   ret i32 %tmp
 
 ; STATIC32-LABEL:   f1:
@@ -34,7 +34,7 @@ entry:
 
 define dso_local i32 @f2() nounwind {
 entry:
-  %tmp = load i32, i32* @t2, align 4
+  %tmp = load i32, ptr @t2, align 4
   ret i32 %tmp
 
 ; STATICGP32-LABEL: f2:
@@ -75,8 +75,8 @@ entry:
 ; MM:   addu16  $[[R1:[0-9]+]], $[[R0]], $2
 ; MM:   lw      ${{[0-9]+}}, %dtprel_lo(f3.i)($[[R1]])
 
-  %0 = load i32, i32* @f3.i, align 4
+  %0 = load i32, ptr @f3.i, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @f3.i, align 4
+  store i32 %inc, ptr @f3.i, align 4
   ret i32 %inc
 }

diff  --git a/llvm/test/CodeGen/Mips/tls.ll b/llvm/test/CodeGen/Mips/tls.ll
index 4ef885e8fb06a..3ed6f6bd79ca3 100644
--- a/llvm/test/CodeGen/Mips/tls.ll
+++ b/llvm/test/CodeGen/Mips/tls.ll
@@ -10,7 +10,7 @@
 
 define dso_preemptable i32 @f1() nounwind {
 entry:
-  %tmp = load i32, i32* @t1, align 4
+  %tmp = load i32, ptr @t1, align 4
   ret i32 %tmp
 
 ; PIC32-LABEL:       f1:
@@ -40,7 +40,7 @@ entry:
 
 define dso_preemptable i32 @f2() nounwind {
 entry:
-  %tmp = load i32, i32* @t2, align 4
+  %tmp = load i32, ptr @t2, align 4
   ret i32 %tmp
 
 ; PIC32-LABEL:       f2:
@@ -100,8 +100,8 @@ entry:
 ; MM:   addu16  $[[R1:[0-9]+]], $[[R0]], $2
 ; MM:   lw      ${{[0-9]+}}, %dtprel_lo(f3.i)($[[R1]])
 
-  %0 = load i32, i32* @f3.i, align 4
+  %0 = load i32, ptr @f3.i, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @f3.i, align 4
+  store i32 %inc, ptr @f3.i, align 4
   ret i32 %inc
 }

diff  --git a/llvm/test/CodeGen/Mips/tls16.ll b/llvm/test/CodeGen/Mips/tls16.ll
index 8ab7416ff3d3e..a7c881b699330 100644
--- a/llvm/test/CodeGen/Mips/tls16.ll
+++ b/llvm/test/CodeGen/Mips/tls16.ll
@@ -4,7 +4,7 @@
 
 define i32 @foo() nounwind readonly {
 entry:
-  %0 = load i32, i32* @a, align 4
+  %0 = load i32, ptr @a, align 4
 ; PIC16:	lw	${{[0-9]+}}, %call16(__tls_get_addr)(${{[0-9]+}})
 ; PIC16:	addiu	${{[0-9]+}}, %tlsgd(a)
   ret i32 %0

diff  --git a/llvm/test/CodeGen/Mips/tls16_2.ll b/llvm/test/CodeGen/Mips/tls16_2.ll
index 1fdcb655c2bae..2d71924dbe78b 100644
--- a/llvm/test/CodeGen/Mips/tls16_2.ll
+++ b/llvm/test/CodeGen/Mips/tls16_2.ll
@@ -2,14 +2,14 @@
 
 @f.i = internal thread_local unnamed_addr global i32 1, align 4
 
-define i8* @f(i8* nocapture %a) nounwind {
+define ptr @f(ptr nocapture %a) nounwind {
 entry:
-  %0 = load i32, i32* @f.i, align 4
+  %0 = load i32, ptr @f.i, align 4
   %inc = add nsw i32 %0, 1
-  store i32 %inc, i32* @f.i, align 4
-  %1 = inttoptr i32 %inc to i8*
+  store i32 %inc, ptr @f.i, align 4
+  %1 = inttoptr i32 %inc to ptr
 ; PIC16: addiu	${{[0-9]+}}, %tlsldm(f.i)
-  ret i8* %1
+  ret ptr %1
 }
 
 

diff  --git a/llvm/test/CodeGen/Mips/uitofp.ll b/llvm/test/CodeGen/Mips/uitofp.ll
index 63b0c7bdb29c4..08cd692deb3ca 100644
--- a/llvm/test/CodeGen/Mips/uitofp.ll
+++ b/llvm/test/CodeGen/Mips/uitofp.ll
@@ -24,9 +24,9 @@ define void @f0() nounwind {
 entry:
   %b = alloca i32, align 4
   %a = alloca float, align 4
-  store volatile i32 1, i32* %b, align 4
-  %0 = load volatile i32, i32* %b, align 4
+  store volatile i32 1, ptr %b, align 4
+  %0 = load volatile i32, ptr %b, align 4
   %conv = uitofp i32 %0 to float
-  store float %conv, float* %a, align 4
+  store float %conv, ptr %a, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/ul1.ll b/llvm/test/CodeGen/Mips/ul1.ll
index eb5187a8533a2..5e3fc20dd39f8 100644
--- a/llvm/test/CodeGen/Mips/ul1.ll
+++ b/llvm/test/CodeGen/Mips/ul1.ll
@@ -5,7 +5,7 @@
 
 define i32 @main() nounwind {
 entry:
-  store i32 10, i32* getelementptr inbounds (%struct.ua, %struct.ua* @foo, i32 0, i32 1), align 1
+  store i32 10, ptr getelementptr inbounds (%struct.ua, ptr @foo, i32 0, i32 1), align 1
 ; 16:   sb  ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})
 ; 16:   sb  ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})
 ; 16:   sb  ${{[0-9]+}}, {{[0-9]+}}(${{[0-9]+}})

diff  --git a/llvm/test/CodeGen/Mips/unaligned-memops.ll b/llvm/test/CodeGen/Mips/unaligned-memops.ll
index 19fdbd7bb7d69..58475a216d6cc 100644
--- a/llvm/test/CodeGen/Mips/unaligned-memops.ll
+++ b/llvm/test/CodeGen/Mips/unaligned-memops.ll
@@ -5,7 +5,7 @@
 ; Test that the correct ISA version of the unaligned memory operations is
 ; selected up front.
 
-define void @g2(i32* %a, i32* %b) {
+define void @g2(ptr %a, ptr %b) {
   ; MIPS-LABEL: name: g2
   ; MIPS: bb.0.entry:
   ; MIPS:   liveins: $a0, $a1
@@ -29,7 +29,7 @@ define void @g2(i32* %a, i32* %b) {
   ; MICROMIPS:   SWR_MM [[LWR_MM]], [[COPY]], 3 :: (store (s32) into %ir.b, align 1)
   ; MICROMIPS:   RetRA
 entry:
-  %0 = load i32, i32* %a, align 1
-  store i32 %0, i32* %b, align 1
+  %0 = load i32, ptr %a, align 1
+  store i32 %0, ptr %b, align 1
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/unalignedload.ll b/llvm/test/CodeGen/Mips/unalignedload.ll
index aad585fcf6eeb..da57b92e8f6df 100644
--- a/llvm/test/CodeGen/Mips/unalignedload.ll
+++ b/llvm/test/CodeGen/Mips/unalignedload.ll
@@ -90,7 +90,7 @@ define void @bar1() nounwind {
 ; MIPS32R6-EB-NEXT:    jr $ra
 ; MIPS32R6-EB-NEXT:    addiu $sp, $sp, 24
 entry:
-  tail call void @foo2(%struct.S1* byval(%struct.S1) getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind
+  tail call void @foo2(ptr byval(%struct.S1) getelementptr inbounds (%struct.S2, ptr @s2, i32 0, i32 1)) nounwind
   ret void
 }
 
@@ -190,9 +190,9 @@ define void @bar2() nounwind {
 ; MIPS32R6-EB-NEXT:    jr $ra
 ; MIPS32R6-EB-NEXT:    addiu $sp, $sp, 24
 entry:
-  tail call void @foo4(%struct.S4* byval(%struct.S4) @s4) nounwind
+  tail call void @foo4(ptr byval(%struct.S4) @s4) nounwind
   ret void
 }
 
-declare void @foo2(%struct.S1* byval(%struct.S1))
-declare void @foo4(%struct.S4* byval(%struct.S4))
+declare void @foo2(ptr byval(%struct.S1))
+declare void @foo4(ptr byval(%struct.S4))

diff  --git a/llvm/test/CodeGen/Mips/unsized-global.ll b/llvm/test/CodeGen/Mips/unsized-global.ll
index a89ecc1fd1cb9..e009512a45296 100644
--- a/llvm/test/CodeGen/Mips/unsized-global.ll
+++ b/llvm/test/CodeGen/Mips/unsized-global.ll
@@ -7,7 +7,7 @@
 @b = external global %struct.a, align 1
 
 ; Function Attrs: norecurse nounwind readnone
-define %struct.a* @d() {
+define ptr @d() {
 ; CHECK-LABEL: d:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lui $1, %highest(b)
@@ -18,5 +18,5 @@ define %struct.a* @d() {
 ; CHECK-NEXT:    jr $ra
 ; CHECK-NEXT:    daddiu $2, $1, %lo(b)
 entry:
-  ret %struct.a* @b
+  ret ptr @b
 }

diff  --git a/llvm/test/CodeGen/Mips/v2i16tof32.ll b/llvm/test/CodeGen/Mips/v2i16tof32.ll
index ab9fa9eefd413..72d3962921c91 100644
--- a/llvm/test/CodeGen/Mips/v2i16tof32.ll
+++ b/llvm/test/CodeGen/Mips/v2i16tof32.ll
@@ -4,7 +4,7 @@
 ; Function below generates a v2i16 to f32 bitcast.
 ; Test that we are able to match it.
 
-define float @f(<8 x i16>* %a) {
+define float @f(ptr %a) {
 ; CHECK-LABEL: f:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    addiu $sp, $sp, -32
@@ -33,8 +33,8 @@ define float @f(<8 x i16>* %a) {
 ; CHECK-NEXT:    addiu $sp, $sp, 32
 entry:
   %m = alloca <8 x i16>
-  %0 = load <8 x i16>, <8 x i16>* %a
-  store <8 x i16> %0, <8 x i16>* %m
+  %0 = load <8 x i16>, ptr %a
+  store <8 x i16> %0, ptr %m
   %1 = bitcast <8 x i16> %0 to <4 x float>
   %2 = shufflevector <4 x float> %1, <4 x float> undef, <8 x i32> <i32 0, i32 3, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
   %3 = shufflevector <8 x float> zeroinitializer, <8 x float> %2, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7>

diff  --git a/llvm/test/CodeGen/Mips/vector-load-store.ll b/llvm/test/CodeGen/Mips/vector-load-store.ll
index 61cbc5a6dee1f..142f383b72c7a 100644
--- a/llvm/test/CodeGen/Mips/vector-load-store.ll
+++ b/llvm/test/CodeGen/Mips/vector-load-store.ll
@@ -10,8 +10,8 @@ entry:
 ; CHECK: lw
 ; CHECK: sw
 
-  %0 = load <2 x i16>, <2 x i16>* @g1, align 4
-  store <2 x i16> %0, <2 x i16>* @g0, align 4
+  %0 = load <2 x i16>, ptr @g1, align 4
+  store <2 x i16> %0, ptr @g0, align 4
   ret void
 }
 
@@ -20,8 +20,8 @@ entry:
 ; CHECK: lw
 ; CHECK: sw
 
-  %0 = load <4 x i8>, <4 x i8>* @g3, align 4
-  store <4 x i8> %0, <4 x i8>* @g2, align 4
+  %0 = load <4 x i8>, ptr @g3, align 4
+  store <4 x i8> %0, ptr @g2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/vector-setcc.ll b/llvm/test/CodeGen/Mips/vector-setcc.ll
index 64b84e40513eb..f8e6d5e675202 100644
--- a/llvm/test/CodeGen/Mips/vector-setcc.ll
+++ b/llvm/test/CodeGen/Mips/vector-setcc.ll
@@ -6,11 +6,11 @@
 
 define void @foo0() nounwind {
 entry:
-  %0 = load <4 x i32>, <4 x i32>* @a, align 16
-  %1 = load <4 x i32>, <4 x i32>* @b, align 16
+  %0 = load <4 x i32>, ptr @a, align 16
+  %1 = load <4 x i32>, ptr @b, align 16
   %cmp = icmp slt <4 x i32> %0, %1
   %sext = sext <4 x i1> %cmp to <4 x i32>
-  store <4 x i32> %sext, <4 x i32>* @g0, align 16
+  store <4 x i32> %sext, ptr @g0, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/weak.ll b/llvm/test/CodeGen/Mips/weak.ll
index 09dd2a4ebbf71..fdc0a1d885413 100644
--- a/llvm/test/CodeGen/Mips/weak.ll
+++ b/llvm/test/CodeGen/Mips/weak.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -march=mips < %s | FileCheck %s
 
- at t = common global i32 (...)* null, align 4
+ at t = common global ptr null, align 4
 
 define void @f() nounwind {
 entry:
-  store i32 (...)* @test_weak, i32 (...)** @t, align 4
+  store ptr @test_weak, ptr @t, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/Mips/whitespace.ll b/llvm/test/CodeGen/Mips/whitespace.ll
index adb97e1657ff6..6710b73acbf1c 100644
--- a/llvm/test/CodeGen/Mips/whitespace.ll
+++ b/llvm/test/CodeGen/Mips/whitespace.ll
@@ -2,7 +2,7 @@
 ; RUN: llc  -march=mipsel -mattr=mips16 -relocation-model=pic < %s | FileCheck -strict-whitespace %s -check-prefix=16
 ; RUN: llc  -march=mips -mcpu=mips32r2 < %s | FileCheck %s -strict-whitespace -check-prefix=32R2
 
- at main.L = internal unnamed_addr constant [5 x i8*] [i8* blockaddress(@main, %L1), i8* blockaddress(@main, %L2), i8* blockaddress(@main, %L3), i8* blockaddress(@main, %L4), i8* null], align 4
+ at main.L = internal unnamed_addr constant [5 x ptr] [ptr blockaddress(@main, %L1), ptr blockaddress(@main, %L2), ptr blockaddress(@main, %L3), ptr blockaddress(@main, %L4), ptr null], align 4
 @str = private unnamed_addr constant [2 x i8] c"A\00"
 @str5 = private unnamed_addr constant [2 x i8] c"B\00"
 @str6 = private unnamed_addr constant [2 x i8] c"C\00"
@@ -14,32 +14,32 @@ entry:
 ; 16: jalrc	${{[0-9]+}}
 ; 16: jrc	${{[0-9]+}}
 ; 16: jrc	$ra
-  %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str, i32 0, i32 0))
+  %puts = tail call i32 @puts(ptr @str)
   br label %L1
 
 L1:                                               ; preds = %entry, %L3
   %i.0 = phi i32 [ 0, %entry ], [ %inc, %L3 ]
-  %puts5 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str5, i32 0, i32 0))
+  %puts5 = tail call i32 @puts(ptr @str5)
   br label %L2
 
 L2:                                               ; preds = %L1, %L3
   %i.1 = phi i32 [ %i.0, %L1 ], [ %inc, %L3 ]
-  %puts6 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str6, i32 0, i32 0))
+  %puts6 = tail call i32 @puts(ptr @str6)
   br label %L3
 
 L3:                                               ; preds = %L2, %L3
   %i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ]
-  %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str7, i32 0, i32 0))
+  %puts7 = tail call i32 @puts(ptr @str7)
   %inc = add i32 %i.2, 1
-  %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
-  %0 = load i8*, i8** %arrayidx, align 4
-  indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
+  %arrayidx = getelementptr inbounds [5 x ptr], ptr @main.L, i32 0, i32 %i.2
+  %0 = load ptr, ptr %arrayidx, align 4
+  indirectbr ptr %0, [label %L1, label %L2, label %L3, label %L4]
 L4:                                               ; preds = %L3
-  %puts8 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @str8, i32 0, i32 0))
+  %puts8 = tail call i32 @puts(ptr @str8)
   ret i32 0
 }
 
-declare i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(ptr nocapture) nounwind
 
 define i32 @ext(i32 %s, i32 %pos, i32 %sz) nounwind readnone {
 entry:
@@ -49,14 +49,14 @@ entry:
   ret i32 %and
 }
 
-define void @ins(i32 %s, i32* nocapture %d) nounwind {
+define void @ins(i32 %s, ptr nocapture %d) nounwind {
 entry:
 ; 32R2: ins	${{[0-9]+}}, $4, 5, 9
   %and = shl i32 %s, 5
   %shl = and i32 %and, 16352
-  %tmp3 = load i32, i32* %d, align 4
+  %tmp3 = load i32, ptr %d, align 4
   %and5 = and i32 %tmp3, -16353
   %or = or i32 %and5, %shl
-  store i32 %or, i32* %d, align 4
+  store i32 %or, ptr %d, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/Mips/xor1.ll b/llvm/test/CodeGen/Mips/xor1.ll
index b203271a042b5..dcdfbf538ffd5 100644
--- a/llvm/test/CodeGen/Mips/xor1.ll
+++ b/llvm/test/CodeGen/Mips/xor1.ll
@@ -6,12 +6,12 @@
 
 define i32 @main() nounwind {
 entry:
-  %0 = load i32, i32* @x, align 4
-  %1 = load i32, i32* @y, align 4
+  %0 = load i32, ptr @x, align 4
+  %1 = load i32, ptr @y, align 4
   %xor = xor i32 %0, %1
 ; 16:	xor	${{[0-9]+}}, ${{[0-9]+}}
-  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i32 0, i32 0), i32 %xor)
+  %call = call i32 (ptr, ...) @printf(ptr @.str, i32 %xor)
   ret i32 0
 }
 
-declare i32 @printf(i8*, ...)
+declare i32 @printf(ptr, ...)

diff  --git a/llvm/test/CodeGen/Mips/zeroreg.ll b/llvm/test/CodeGen/Mips/zeroreg.ll
index c024d04a39b0b..f2abdb61503c5 100644
--- a/llvm/test/CodeGen/Mips/zeroreg.ll
+++ b/llvm/test/CodeGen/Mips/zeroreg.ll
@@ -25,7 +25,7 @@ entry:
 ; 64R6:          seleqz $2, $[[R0]], $4
 
   %tobool = icmp ne i32 %s, 0
-  %0 = load i32, i32* @g1, align 4
+  %0 = load i32, ptr @g1, align 4
   %cond = select i1 %tobool, i32 0, i32 %0
   ret i32 %cond
 }
@@ -47,7 +47,7 @@ entry:
 ; 64R6:          selnez $2, $[[R0]], $4
 
   %tobool = icmp ne i32 %s, 0
-  %0 = load i32, i32* @g1, align 4
+  %0 = load i32, ptr @g1, align 4
   %cond = select i1 %tobool, i32 %0, i32 0
   ret i32 %cond
 }
@@ -76,7 +76,7 @@ entry:
 ; 64R6:          seleqz $2, $[[R0]], $4
 
   %tobool = icmp ne i64 %s, 0
-  %0 = load i64, i64* @g2, align 4
+  %0 = load i64, ptr @g2, align 4
   %cond = select i1 %tobool, i64 0, i64 %0
   ret i64 %cond
 }
@@ -103,7 +103,7 @@ entry:
 ; 64R6:          selnez $2, $[[R0]], $4
 
   %tobool = icmp ne i64 %s, 0
-  %0 = load i64, i64* @g2, align 4
+  %0 = load i64, ptr @g2, align 4
   %cond = select i1 %tobool, i64 %0, i64 0
   ret i64 %cond
 }


        


More information about the llvm-commits mailing list