[llvm] 262c2c0 - AMDGPU: Update some tests to use opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 06:29:05 PST 2022


Author: Matt Arsenault
Date: 2022-12-19T09:28:58-05:00
New Revision: 262c2c0fd2d148ec36798f42009f6aad4c9123a6

URL: https://github.com/llvm/llvm-project/commit/262c2c0fd2d148ec36798f42009f6aad4c9123a6
DIFF: https://github.com/llvm/llvm-project/commit/262c2c0fd2d148ec36798f42009f6aad4c9123a6.diff

LOG: AMDGPU: Update some tests to use opaque pointers

vectorize-buffer-fat-pointer.ll required a manual check line fix.
vector-alloca-addrspacecast.ll required a manual fixup of a check
line. partial-regcopy-and-spill-missed-at-regalloc.ll required
re-running update_mir_test_checks. The HSA metadata tests required
avoiding the script touching the type name in the metadata.

annotate-noclobber.ll ran into one update script bug. It deleted a
check line with a 0 offset GEP, moving the following -NEXT check
logically up one line.

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
    llvm/test/CodeGen/AMDGPU/amdpal.ll
    llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
    llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
    llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
    llvm/test/CodeGen/AMDGPU/annotate-noclobber.ll
    llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
    llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
    llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
    llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
    llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg-v3.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ctor-dtor-list.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full-v3.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll
    llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll
    llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt-opaque-ptr.ll
    llvm/test/CodeGen/AMDGPU/indirect-addressing-term.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
    llvm/test/CodeGen/AMDGPU/load-global-f32.ll
    llvm/test/CodeGen/AMDGPU/load-global-i32.ll
    llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
    llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
    llvm/test/CodeGen/AMDGPU/promote-alloca-addrspacecast.ll
    llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
    llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
    llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
    llvm/test/CodeGen/AMDGPU/spill-csr-frame-ptr-reg-copy.ll
    llvm/test/CodeGen/AMDGPU/trunc-store-i64.ll
    llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
    llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
    llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
    llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
    llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
    llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
    llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
    llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
    llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll b/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
index 2e48b0ba5945..f234847900e7 100644
--- a/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
+++ b/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
@@ -96,7 +96,7 @@ define amdgpu_kernel void @parent_kernel_missing_inputs() #0 {
 }
 
 ; Function is marked with amdgpu-no-workitem-id-* but uses them anyway
-define void @marked_func_use_workitem_id(i32 addrspace(1)* %ptr) #0 {
+define void @marked_func_use_workitem_id(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-SDAG-LABEL: marked_func_use_workitem_id:
 ; FIXEDABI-SDAG:       ; %bb.0:
 ; FIXEDABI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -127,14 +127,14 @@ define void @marked_func_use_workitem_id(i32 addrspace(1)* %ptr) #0 {
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %id.y = call i32 @llvm.amdgcn.workitem.id.y()
   %id.z = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %id.x, i32 addrspace(1)* %ptr
-  store volatile i32 %id.y, i32 addrspace(1)* %ptr
-  store volatile i32 %id.z, i32 addrspace(1)* %ptr
+  store volatile i32 %id.x, ptr addrspace(1) %ptr
+  store volatile i32 %id.y, ptr addrspace(1) %ptr
+  store volatile i32 %id.z, ptr addrspace(1) %ptr
   ret void
 }
 
 ; Function is marked with amdgpu-no-workitem-id-* but uses them anyway
-define amdgpu_kernel void @marked_kernel_use_workitem_id(i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @marked_kernel_use_workitem_id(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-LABEL: marked_kernel_use_workitem_id:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -151,13 +151,13 @@ define amdgpu_kernel void @marked_kernel_use_workitem_id(i32 addrspace(1)* %ptr)
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
   %id.y = call i32 @llvm.amdgcn.workitem.id.y()
   %id.z = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %id.x, i32 addrspace(1)* %ptr
-  store volatile i32 %id.y, i32 addrspace(1)* %ptr
-  store volatile i32 %id.z, i32 addrspace(1)* %ptr
+  store volatile i32 %id.x, ptr addrspace(1) %ptr
+  store volatile i32 %id.y, ptr addrspace(1) %ptr
+  store volatile i32 %id.z, ptr addrspace(1) %ptr
   ret void
 }
 
-define void @marked_func_use_workgroup_id(i32 addrspace(1)* %ptr) #0 {
+define void @marked_func_use_workgroup_id(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-LABEL: marked_func_use_workgroup_id:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -174,13 +174,13 @@ define void @marked_func_use_workgroup_id(i32 addrspace(1)* %ptr) #0 {
   %id.x = call i32 @llvm.amdgcn.workgroup.id.x()
   %id.y = call i32 @llvm.amdgcn.workgroup.id.y()
   %id.z = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %id.x, i32 addrspace(1)* %ptr
-  store volatile i32 %id.y, i32 addrspace(1)* %ptr
-  store volatile i32 %id.z, i32 addrspace(1)* %ptr
+  store volatile i32 %id.x, ptr addrspace(1) %ptr
+  store volatile i32 %id.y, ptr addrspace(1) %ptr
+  store volatile i32 %id.z, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @marked_kernel_use_workgroup_id(i32 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @marked_kernel_use_workgroup_id(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-LABEL: marked_kernel_use_workgroup_id:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
@@ -200,13 +200,13 @@ define amdgpu_kernel void @marked_kernel_use_workgroup_id(i32 addrspace(1)* %ptr
   %id.x = call i32 @llvm.amdgcn.workgroup.id.x()
   %id.y = call i32 @llvm.amdgcn.workgroup.id.y()
   %id.z = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %id.x, i32 addrspace(1)* %ptr
-  store volatile i32 %id.y, i32 addrspace(1)* %ptr
-  store volatile i32 %id.z, i32 addrspace(1)* %ptr
+  store volatile i32 %id.x, ptr addrspace(1) %ptr
+  store volatile i32 %id.y, ptr addrspace(1) %ptr
+  store volatile i32 %id.z, ptr addrspace(1) %ptr
   ret void
 }
 
-define void @marked_func_use_other_sgpr(i64 addrspace(1)* %ptr) #0 {
+define void @marked_func_use_other_sgpr(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-LABEL: marked_func_use_other_sgpr:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -227,18 +227,18 @@ define void @marked_func_use_other_sgpr(i64 addrspace(1)* %ptr) #0 {
 ; FIXEDABI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0)
 ; FIXEDABI-NEXT:    s_setpc_b64 s[30:31]
-  %queue.ptr = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %dispatch.id = call i64 @llvm.amdgcn.dispatch.id()
-  %queue.load = load volatile i8, i8 addrspace(4)* %queue.ptr
-  %implicitarg.load = load volatile i8, i8 addrspace(4)* %implicitarg.ptr
-  %dispatch.load = load volatile i8, i8 addrspace(4)* %dispatch.ptr
-  store volatile i64 %dispatch.id, i64 addrspace(1)* %ptr
+  %queue.load = load volatile i8, ptr addrspace(4) %queue.ptr
+  %implicitarg.load = load volatile i8, ptr addrspace(4) %implicitarg.ptr
+  %dispatch.load = load volatile i8, ptr addrspace(4) %dispatch.ptr
+  store volatile i64 %dispatch.id, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @marked_kernel_use_other_sgpr(i64 addrspace(1)* %ptr) #0 {
+define amdgpu_kernel void @marked_kernel_use_other_sgpr(ptr addrspace(1) %ptr) #0 {
 ; FIXEDABI-LABEL: marked_kernel_use_other_sgpr:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_add_u32 s0, s4, 8
@@ -251,14 +251,14 @@ define amdgpu_kernel void @marked_kernel_use_other_sgpr(i64 addrspace(1)* %ptr)
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0)
 ; FIXEDABI-NEXT:    flat_load_ubyte v0, v[0:1] glc
 ; FIXEDABI-NEXT:    s_endpgm
-  %queue.ptr = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+  %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %dispatch.id = call i64 @llvm.amdgcn.dispatch.id()
-  %queue.load = load volatile i8, i8 addrspace(4)* %queue.ptr
-  %implicitarg.load = load volatile i8, i8 addrspace(4)* %implicitarg.ptr
-  %dispatch.load = load volatile i8, i8 addrspace(4)* %dispatch.ptr
-  store volatile i64 %dispatch.id, i64 addrspace(1)* %ptr
+  %queue.load = load volatile i8, ptr addrspace(4) %queue.ptr
+  %implicitarg.load = load volatile i8, ptr addrspace(4) %implicitarg.ptr
+  %dispatch.load = load volatile i8, ptr addrspace(4) %dispatch.ptr
+  store volatile i64 %dispatch.id, ptr addrspace(1) %ptr
   ret void
 }
 
@@ -269,13 +269,13 @@ define amdgpu_kernel void @marked_kernel_nokernargs_implicitarg_ptr() #0 {
 ; FIXEDABI-NEXT:    v_mov_b32_e32 v1, 0
 ; FIXEDABI-NEXT:    flat_load_ubyte v0, v[0:1] glc
 ; FIXEDABI-NEXT:    s_endpgm
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %implicitarg.load = load volatile i8, i8 addrspace(4)* %implicitarg.ptr
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %implicitarg.load = load volatile i8, ptr addrspace(4) %implicitarg.ptr
   ret void
 }
 
 ; On gfx8, the queue ptr is required for this addrspacecast.
-define void @addrspacecast_requires_queue_ptr(i32 addrspace(5)* %ptr.private, i32 addrspace(3)* %ptr.local) #0 {
+define void @addrspacecast_requires_queue_ptr(ptr addrspace(5) %ptr.private, ptr addrspace(3) %ptr.local) #0 {
 ; FIXEDABI-SDAG-LABEL: addrspacecast_requires_queue_ptr:
 ; FIXEDABI-SDAG:       ; %bb.0:
 ; FIXEDABI-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -317,14 +317,14 @@ define void @addrspacecast_requires_queue_ptr(i32 addrspace(5)* %ptr.private, i3
 ; FIXEDABI-GISEL-NEXT:    flat_store_dword v[0:1], v2
 ; FIXEDABI-GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; FIXEDABI-GISEL-NEXT:    s_setpc_b64 s[30:31]
-  %flat.private = addrspacecast i32 addrspace(5)* %ptr.private to i32*
-  %flat.local = addrspacecast i32 addrspace(3)* %ptr.local to i32*
-  store volatile i32 1, i32* %flat.private
-  store volatile i32 2, i32* %flat.local
+  %flat.private = addrspacecast ptr addrspace(5) %ptr.private to ptr
+  %flat.local = addrspacecast ptr addrspace(3) %ptr.local to ptr
+  store volatile i32 1, ptr %flat.private
+  store volatile i32 2, ptr %flat.local
   ret void
 }
 
-define void @is_shared_requires_queue_ptr(i8* %ptr) #0 {
+define void @is_shared_requires_queue_ptr(ptr %ptr) #0 {
 ; FIXEDABI-LABEL: is_shared_requires_queue_ptr:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -335,13 +335,13 @@ define void @is_shared_requires_queue_ptr(i8* %ptr) #0 {
 ; FIXEDABI-NEXT:    flat_store_dword v[0:1], v0
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0)
 ; FIXEDABI-NEXT:    s_setpc_b64 s[30:31]
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %ptr)
   %zext = zext i1 %is.shared to i32
-  store volatile i32 %zext, i32 addrspace(1)* undef
+  store volatile i32 %zext, ptr addrspace(1) undef
   ret void
 }
 
-define void @is_private_requires_queue_ptr(i8* %ptr) #0 {
+define void @is_private_requires_queue_ptr(ptr %ptr) #0 {
 ; FIXEDABI-LABEL: is_private_requires_queue_ptr:
 ; FIXEDABI:       ; %bb.0:
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -352,9 +352,9 @@ define void @is_private_requires_queue_ptr(i8* %ptr) #0 {
 ; FIXEDABI-NEXT:    flat_store_dword v[0:1], v0
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0)
 ; FIXEDABI-NEXT:    s_setpc_b64 s[30:31]
-  %is.private = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %ptr)
   %zext = zext i1 %is.private to i32
-  store volatile i32 %zext, i32 addrspace(1)* undef
+  store volatile i32 %zext, ptr addrspace(1) undef
   ret void
 }
 
@@ -383,12 +383,12 @@ declare i32 @llvm.amdgcn.workitem.id.z()
 declare i32 @llvm.amdgcn.workgroup.id.x()
 declare i32 @llvm.amdgcn.workgroup.id.y()
 declare i32 @llvm.amdgcn.workgroup.id.z()
-declare noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-declare noalias i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+declare noalias ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+declare noalias ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 declare i64 @llvm.amdgcn.dispatch.id()
-declare noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-declare i1 @llvm.amdgcn.is.shared(i8*)
-declare i1 @llvm.amdgcn.is.private(i8*)
+declare noalias ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+declare i1 @llvm.amdgcn.is.shared(ptr)
+declare i1 @llvm.amdgcn.is.private(ptr)
 declare void @llvm.trap()
 declare void @llvm.debugtrap()
 

diff  --git a/llvm/test/CodeGen/AMDGPU/amdpal.ll b/llvm/test/CodeGen/AMDGPU/amdpal.ll
index 918015f735ef..f2ba400174c7 100644
--- a/llvm/test/CodeGen/AMDGPU/amdpal.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdpal.ll
@@ -3,9 +3,9 @@
 
 ; PAL-NOT: .AMDGPU.config
 ; PAL-LABEL: {{^}}simple:
-define amdgpu_kernel void @simple(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @simple(ptr addrspace(1) %out) {
 entry:
-  store i32 0, i32 addrspace(1)* %out
+  store i32 0, ptr addrspace(1) %out
   ret void
 }
 
@@ -18,14 +18,13 @@ entry:
 ; PAL: s_load_dwordx4 s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}, s[[[GITPTR]]:
 ; PAL: buffer_store{{.*}}, s[[[SCRATCHDESC]]:
 
-define amdgpu_kernel void @scratch(<2 x i32> %in, i32 %idx, i32 addrspace(5)* %out) {
+define amdgpu_kernel void @scratch(<2 x i32> %in, i32 %idx, ptr addrspace(5) %out) {
 entry:
   %v = alloca [2 x i32], addrspace(5)
-  %vv = bitcast [2 x i32] addrspace(5)* %v to <2 x i32> addrspace(5)*
-  store <2 x i32> %in, <2 x i32> addrspace(5)* %vv
-  %e = getelementptr [2 x i32], [2 x i32] addrspace(5)* %v, i32 0, i32 %idx
-  %x = load i32, i32 addrspace(5)* %e
-  store i32 %x, i32 addrspace(5)* %out
+  store <2 x i32> %in, ptr addrspace(5) %v
+  %e = getelementptr [2 x i32], ptr addrspace(5) %v, i32 0, i32 %idx
+  %x = load i32, ptr addrspace(5) %e
+  store i32 %x, ptr addrspace(5) %out
   ret void
 }
 
@@ -42,14 +41,13 @@ entry:
 ; PAL: s_load_dwordx4 s[[[SCRATCHDESC:[0-9]+]]:{{[0-9]+]}}, s[[[GITPTR]]:
 ; PAL: buffer_store{{.*}}, s[[[SCRATCHDESC]]:
 
-define amdgpu_kernel void @scratch2(<2 x i32> %in, i32 %idx, i32 addrspace(5)* %out) #0 {
+define amdgpu_kernel void @scratch2(<2 x i32> %in, i32 %idx, ptr addrspace(5) %out) #0 {
 entry:
   %v = alloca [2 x i32], addrspace(5)
-  %vv = bitcast [2 x i32] addrspace(5)* %v to <2 x i32> addrspace(5)*
-  store <2 x i32> %in, <2 x i32> addrspace(5)* %vv
-  %e = getelementptr [2 x i32], [2 x i32] addrspace(5)* %v, i32 0, i32 %idx
-  %x = load i32, i32 addrspace(5)* %e
-  store i32 %x, i32 addrspace(5)* %out
+  store <2 x i32> %in, ptr addrspace(5) %v
+  %e = getelementptr [2 x i32], ptr addrspace(5) %v, i32 0, i32 %idx
+  %x = load i32, ptr addrspace(5) %e
+  store i32 %x, ptr addrspace(5) %out
   ret void
 }
 
@@ -68,14 +66,11 @@ entry:
 define amdgpu_cs void @scratch2_cs(i32 inreg, i32 inreg, i32 inreg, <3 x i32> inreg, i32 inreg, <3 x i32> %coord, <2 x i32> %in, i32 %extra, i32 %idx) #0 {
 entry:
   %v = alloca [3 x i32], addrspace(5)
-  %v0 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %v, i32 0, i32 0
-  %v1 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %v, i32 0, i32 1
-  store i32 %extra, i32 addrspace(5)* %v0
-  %v1a = bitcast i32 addrspace(5)* %v1 to [2 x i32] addrspace(5)*
-  %vv = bitcast [2 x i32] addrspace(5)* %v1a to <2 x i32> addrspace(5)*
-  store <2 x i32> %in, <2 x i32> addrspace(5)* %vv
-  %e = getelementptr [2 x i32], [2 x i32] addrspace(5)* %v1a, i32 0, i32 %idx
-  %x = load i32, i32 addrspace(5)* %e
+  %v1 = getelementptr [3 x i32], ptr addrspace(5) %v, i32 0, i32 1
+  store i32 %extra, ptr addrspace(5) %v
+  store <2 x i32> %in, ptr addrspace(5) %v1
+  %e = getelementptr [2 x i32], ptr addrspace(5) %v1, i32 0, i32 %idx
+  %x = load i32, ptr addrspace(5) %e
   %xf = bitcast i32 %x to float
   call void @llvm.amdgcn.raw.buffer.store.f32(float %xf, <4 x i32> undef, i32 0, i32 0, i32 0)
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index 0af30a2ef07e..401d219d7acc 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -12,27 +12,27 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
 declare i32 @llvm.amdgcn.workitem.id.y() #0
 declare i32 @llvm.amdgcn.workitem.id.z() #0
 
-declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0
-declare i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0
-declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0
-declare i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.queue.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #0
 declare i64 @llvm.amdgcn.dispatch.id() #0
 
 define void @use_workitem_id_x() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workitem_id_x
 ; AKF_HSA-SAME: () #[[ATTR1:[0-9]+]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workitem_id_x
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR1:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.x()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -40,17 +40,17 @@ define void @use_workitem_id_y() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workitem_id_y
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workitem_id_y
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR2:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.y()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -58,17 +58,17 @@ define void @use_workitem_id_z() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workitem_id_z
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workitem_id_z
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR3:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -76,17 +76,17 @@ define void @use_workgroup_id_x() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_x
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_x
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR4:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.x()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -94,17 +94,17 @@ define void @use_workgroup_id_y() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_y
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_y
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR5:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.y()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -112,53 +112,53 @@ define void @use_workgroup_id_z() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_z
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_z
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR6:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   ret void
 }
 
 define void @use_dispatch_ptr() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[DISPATCH_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[DISPATCH_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR7:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[DISPATCH_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[DISPATCH_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  store volatile i8 addrspace(4)* %dispatch.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  store volatile ptr addrspace(4) %dispatch.ptr, ptr addrspace(1) undef
   ret void
 }
 
 define void @use_queue_ptr() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_queue_ptr
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[QUEUE_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[QUEUE_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[QUEUE_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[QUEUE_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_queue_ptr
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR8:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[QUEUE_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[QUEUE_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[QUEUE_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[QUEUE_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %queue.ptr = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-  store volatile i8 addrspace(4)* %queue.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+  store volatile ptr addrspace(4) %queue.ptr, ptr addrspace(1) undef
   ret void
 }
 
@@ -166,17 +166,17 @@ define void @use_dispatch_id() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_dispatch_id
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i64 @llvm.amdgcn.dispatch.id()
-; AKF_HSA-NEXT:    store volatile i64 [[VAL]], i64 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i64 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_dispatch_id
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR9:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i64 @llvm.amdgcn.dispatch.id()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i64 [[VAL]], i64 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i64 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i64 @llvm.amdgcn.dispatch.id()
-  store volatile i64 %val, i64 addrspace(1)* undef
+  store volatile i64 %val, ptr addrspace(1) undef
   ret void
 }
 
@@ -185,22 +185,22 @@ define void @use_workgroup_id_y_workgroup_id_z() #1 {
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* undef, align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_workgroup_id_y_workgroup_id_z
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR10:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* undef, align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.y()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* undef
-  store volatile i32 %val1, i32 addrspace(1)* undef
+  store volatile i32 %val0, ptr addrspace(1) undef
+  store volatile i32 %val1, ptr addrspace(1) undef
   ret void
 }
 
@@ -418,19 +418,19 @@ define void @recursive_use_workitem_id_y() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@recursive_use_workitem_id_y
 ; AKF_HSA-SAME: () #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    call void @recursive_use_workitem_id_y()
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@recursive_use_workitem_id_y
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR2]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    call void @recursive_use_workitem_id_y()
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.y()
-  store volatile i32 %val, i32 addrspace(1)* undef
+  store volatile i32 %val, ptr addrspace(1) undef
   call void @recursive_use_workitem_id_y()
   ret void
 }
@@ -450,60 +450,60 @@ define void @call_recursive_use_workitem_id_y() #1 {
   ret void
 }
 
-define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
+define void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast
-; AKF_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; AKF_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; AKF_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; AKF_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR8]] {
-; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR8]] {
+; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
-  store volatile i32 0, i32 addrspace(4)* %stof
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr addrspace(4)
+  store volatile i32 0, ptr addrspace(4) %stof
   ret void
 }
 
 
-define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #2 {
+define void @use_group_to_flat_addrspacecast_gfx9(ptr addrspace(3) %ptr) #2 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast_gfx9
-; AKF_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
-; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; AKF_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; AKF_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
+; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; AKF_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast_gfx9
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR12:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR12:[0-9]+]] {
+; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
-  store volatile i32 0, i32 addrspace(4)* %stof
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr addrspace(4)
+  store volatile i32 0, ptr addrspace(4) %stof
   ret void
 }
 
-define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #2 {
+define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(ptr addrspace(3) %ptr) #2 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast_queue_ptr_gfx9
-; AKF_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR2]] {
-; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; AKF_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; AKF_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR2]] {
+; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; AKF_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; AKF_HSA-NEXT:    call void @func_indirect_use_queue_ptr()
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast_queue_ptr_gfx9
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR13:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32 addrspace(4)*
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32 addrspace(4)* [[STOF]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR13:[0-9]+]] {
+; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr addrspace(4)
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr addrspace(4) [[STOF]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    call void @func_indirect_use_queue_ptr()
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)*
-  store volatile i32 0, i32 addrspace(4)* %stof
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr addrspace(4)
+  store volatile i32 0, ptr addrspace(4) %stof
   call void @func_indirect_use_queue_ptr()
   ret void
 }
@@ -511,63 +511,63 @@ define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %p
 define void @indirect_use_group_to_flat_addrspacecast() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null)
+; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast(ptr addrspace(3) null)
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR8]] {
-; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null)
+; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast(ptr addrspace(3) null)
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null)
+  call void @use_group_to_flat_addrspacecast(ptr addrspace(3) null)
   ret void
 }
 
 define void @indirect_use_group_to_flat_addrspacecast_gfx9() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast_gfx9
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null)
+; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_gfx9(ptr addrspace(3) null)
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast_gfx9
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null)
+; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_gfx9(ptr addrspace(3) null)
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null)
+  call void @use_group_to_flat_addrspacecast_gfx9(ptr addrspace(3) null)
   ret void
 }
 
 define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null)
+; AKF_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(ptr addrspace(3) null)
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR8]] {
-; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null)
+; ATTRIBUTOR_HSA-NEXT:    call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(ptr addrspace(3) null)
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null)
+  call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(ptr addrspace(3) null)
   ret void
 }
 
 define void @use_kernarg_segment_ptr() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_kernarg_segment_ptr
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[KERNARG_SEGMENT_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[KERNARG_SEGMENT_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_kernarg_segment_ptr
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[KERNARG_SEGMENT_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[KERNARG_SEGMENT_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[KERNARG_SEGMENT_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %kernarg.segment.ptr = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-  store volatile i8 addrspace(4)* %kernarg.segment.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %kernarg.segment.ptr = call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+  store volatile ptr addrspace(4) %kernarg.segment.ptr, ptr addrspace(1) undef
   ret void
 }
 define void @func_indirect_use_kernarg_segment_ptr() #1 {
@@ -588,36 +588,36 @@ define void @func_indirect_use_kernarg_segment_ptr() #1 {
 define amdgpu_kernel void @kern_use_implicitarg_ptr() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@kern_use_implicitarg_ptr
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[IMPLICITARG_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[IMPLICITARG_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@kern_use_implicitarg_ptr
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR14:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[IMPLICITARG_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[IMPLICITARG_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  store volatile i8 addrspace(4)* %implicitarg.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  store volatile ptr addrspace(4) %implicitarg.ptr, ptr addrspace(1) undef
   ret void
 }
 
 define void @use_implicitarg_ptr() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_implicitarg_ptr
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[IMPLICITARG_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[IMPLICITARG_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_implicitarg_ptr
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR14]] {
-; ATTRIBUTOR_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[IMPLICITARG_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[IMPLICITARG_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  store volatile i8 addrspace(4)* %implicitarg.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  store volatile ptr addrspace(4) %implicitarg.ptr, ptr addrspace(1) undef
   ret void
 }
 
@@ -728,48 +728,48 @@ define amdgpu_kernel void @func_kern_defined() #3 {
 define i32 @use_dispatch_ptr_ret_type() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr_ret_type
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; AKF_HSA-NEXT:    store volatile i8 addrspace(4)* [[DISPATCH_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; AKF_HSA-NEXT:    store volatile ptr addrspace(4) [[DISPATCH_PTR]], ptr addrspace(1) undef, align 8
 ; AKF_HSA-NEXT:    ret i32 0
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr_ret_type
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR7]] {
-; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i8 addrspace(4)* [[DISPATCH_PTR]], i8 addrspace(4)* addrspace(1)* undef, align 8
+; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; ATTRIBUTOR_HSA-NEXT:    store volatile ptr addrspace(4) [[DISPATCH_PTR]], ptr addrspace(1) undef, align 8
 ; ATTRIBUTOR_HSA-NEXT:    ret i32 0
 ;
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  store volatile i8 addrspace(4)* %dispatch.ptr, i8 addrspace(4)* addrspace(1)* undef
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  store volatile ptr addrspace(4) %dispatch.ptr, ptr addrspace(1) undef
   ret i32 0
 }
 
 define float @func_indirect_use_dispatch_ptr_constexpr_cast_func() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@func_indirect_use_dispatch_ptr_constexpr_cast_func
 ; AKF_HSA-SAME: () #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[F:%.*]] = call float bitcast (i32 ()* @use_dispatch_ptr_ret_type to float ()*)()
+; AKF_HSA-NEXT:    [[F:%.*]] = call float @use_dispatch_ptr_ret_type()
 ; AKF_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; AKF_HSA-NEXT:    ret float [[FADD]]
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@func_indirect_use_dispatch_ptr_constexpr_cast_func
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR7]] {
-; ATTRIBUTOR_HSA-NEXT:    [[F:%.*]] = call float bitcast (i32 ()* @use_dispatch_ptr_ret_type to float ()*)()
+; ATTRIBUTOR_HSA-NEXT:    [[F:%.*]] = call float @use_dispatch_ptr_ret_type()
 ; ATTRIBUTOR_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; ATTRIBUTOR_HSA-NEXT:    ret float [[FADD]]
 ;
-  %f = call float bitcast (i32()* @use_dispatch_ptr_ret_type to float()*)()
+  %f = call float @use_dispatch_ptr_ret_type()
   %fadd = fadd float %f, 1.0
   ret float %fadd
 }
 
-define float @func_indirect_call(float()* %fptr) #3 {
+define float @func_indirect_call(ptr %fptr) #3 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@func_indirect_call
-; AKF_HSA-SAME: (float ()* [[FPTR:%.*]]) #[[ATTR3]] {
+; AKF_HSA-SAME: (ptr [[FPTR:%.*]]) #[[ATTR3]] {
 ; AKF_HSA-NEXT:    [[F:%.*]] = call float [[FPTR]]()
 ; AKF_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; AKF_HSA-NEXT:    ret float [[FADD]]
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@func_indirect_call
-; ATTRIBUTOR_HSA-SAME: (float ()* [[FPTR:%.*]]) #[[ATTR15]] {
+; ATTRIBUTOR_HSA-SAME: (ptr [[FPTR:%.*]]) #[[ATTR15]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[F:%.*]] = call float [[FPTR]]()
 ; ATTRIBUTOR_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; ATTRIBUTOR_HSA-NEXT:    ret float [[FADD]]
@@ -798,15 +798,15 @@ define float @func_extern_call() #3 {
   ret float %fadd
 }
 
-define float @func_null_call(float()* %fptr) #3 {
+define float @func_null_call(ptr %fptr) #3 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@func_null_call
-; AKF_HSA-SAME: (float ()* [[FPTR:%.*]]) #[[ATTR3]] {
+; AKF_HSA-SAME: (ptr [[FPTR:%.*]]) #[[ATTR3]] {
 ; AKF_HSA-NEXT:    [[F:%.*]] = call float null()
 ; AKF_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; AKF_HSA-NEXT:    ret float [[FADD]]
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@func_null_call
-; ATTRIBUTOR_HSA-SAME: (float ()* [[FPTR:%.*]]) #[[ATTR15]] {
+; ATTRIBUTOR_HSA-SAME: (ptr [[FPTR:%.*]]) #[[ATTR15]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[F:%.*]] = call float null()
 ; ATTRIBUTOR_HSA-NEXT:    [[FADD:%.*]] = fadd float [[F]], 1.000000e+00
 ; ATTRIBUTOR_HSA-NEXT:    ret float [[FADD]]
@@ -841,15 +841,15 @@ define float @func_other_intrinsic_call(float %arg) #3 {
 define amdgpu_kernel void @kern_sanitize_address() #4 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@kern_sanitize_address
 ; AKF_HSA-SAME: () #[[ATTR5:[0-9]+]] {
-; AKF_HSA-NEXT:    store volatile i32 0, i32 addrspace(1)* null, align 4
+; AKF_HSA-NEXT:    store volatile i32 0, ptr addrspace(1) null, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@kern_sanitize_address
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR17:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32 addrspace(1)* null, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr addrspace(1) null, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  store volatile i32 0, i32 addrspace(1)* null
+  store volatile i32 0, ptr addrspace(1) null
   ret void
 }
 
@@ -857,15 +857,15 @@ define amdgpu_kernel void @kern_sanitize_address() #4 {
 define void @func_sanitize_address() #4 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@func_sanitize_address
 ; AKF_HSA-SAME: () #[[ATTR5]] {
-; AKF_HSA-NEXT:    store volatile i32 0, i32 addrspace(1)* null, align 4
+; AKF_HSA-NEXT:    store volatile i32 0, ptr addrspace(1) null, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@func_sanitize_address
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR17]] {
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32 addrspace(1)* null, align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr addrspace(1) null, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  store volatile i32 0, i32 addrspace(1)* null
+  store volatile i32 0, ptr addrspace(1) null
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index b3e7a55f6b59..8e3965b0f112 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -12,336 +12,336 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
 declare i32 @llvm.amdgcn.workitem.id.y() #0
 declare i32 @llvm.amdgcn.workitem.id.z() #0
 
-declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #0
-declare i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0
-declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.queue.ptr() #0
+declare ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #0
 
-declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #2
-declare i1 @llvm.amdgcn.is.private(i8* nocapture) #2
+declare i1 @llvm.amdgcn.is.shared(ptr nocapture) #2
+declare i1 @llvm.amdgcn.is.private(ptr nocapture) #2
 
-define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x(ptr addrspace(1) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_tgid_x
-; HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
 ; HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
-; HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.x()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_y
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_y
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.y()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @multi_use_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@multi_use_tgid_y
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@multi_use_tgid_y
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
   %val1 = call i32 @llvm.amdgcn.workgroup.id.y()
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_x_y
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_x_y
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.x()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workgroup.id.z()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_x_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_x_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.x()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_y_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_y_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.y()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tgid_x_y_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; AKF_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tgid_x_y_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR4]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workgroup.id.x()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.y()
   %val2 = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x(ptr addrspace(1) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_tidig_x
-; HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.x()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tidig_y
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tidig_y
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.y()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tidig_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tidig_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val = call i32 @llvm.amdgcn.workitem.id.z()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_tgid_x(ptr addrspace(1) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
-; HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
-; HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workitem.id.x()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.x()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_y_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workitem.id.y()
   %val1 = call i32 @llvm.amdgcn.workgroup.id.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_tidig_x_y_z
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; AKF_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_tidig_x_y_z
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workitem.id.x()
   %val1 = call i32 @llvm.amdgcn.workitem.id.y()
   %val2 = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_all_workitems(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_all_workitems
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; AKF_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; AKF_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; AKF_HSA-NEXT:    [[VAL3:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; AKF_HSA-NEXT:    [[VAL4:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; AKF_HSA-NEXT:    [[VAL5:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_HSA-NEXT:    store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL3]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL4]], ptr addrspace(1) [[PTR]], align 4
+; AKF_HSA-NEXT:    store volatile i32 [[VAL5]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_all_workitems
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL3:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL4:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
 ; ATTRIBUTOR_HSA-NEXT:    [[VAL5:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL3]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL4]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 [[VAL5]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.amdgcn.workitem.id.x()
@@ -350,227 +350,219 @@ define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
   %val3 = call i32 @llvm.amdgcn.workgroup.id.x()
   %val4 = call i32 @llvm.amdgcn.workgroup.id.y()
   %val5 = call i32 @llvm.amdgcn.workgroup.id.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
-  store volatile i32 %val3, i32 addrspace(1)* %ptr
-  store volatile i32 %val4, i32 addrspace(1)* %ptr
-  store volatile i32 %val5, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
+  store volatile i32 %val3, ptr addrspace(1) %ptr
+  store volatile i32 %val4, ptr addrspace(1) %ptr
+  store volatile i32 %val5, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_dispatch_ptr(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_dispatch_ptr(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; AKF_HSA-NEXT:    [[BC:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
-; AKF_HSA-NEXT:    [[VAL:%.*]] = load i32, i32 addrspace(4)* [[BC]], align 4
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; AKF_HSA-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(4) [[DISPATCH_PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_dispatch_ptr
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR10:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-; ATTRIBUTOR_HSA-NEXT:    [[BC:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
-; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = load i32, i32 addrspace(4)* [[BC]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR10:[0-9]+]] {
+; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(4) [[DISPATCH_PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-  %bc = bitcast i8 addrspace(4)* %dispatch.ptr to i32 addrspace(4)*
-  %val = load i32, i32 addrspace(4)* %bc
-  store i32 %val, i32 addrspace(1)* %ptr
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+  %val = load i32, ptr addrspace(4) %dispatch.ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_queue_ptr(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_queue_ptr(ptr addrspace(1) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_queue_ptr
-; AKF_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-; AKF_HSA-NEXT:    [[BC:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
-; AKF_HSA-NEXT:    [[VAL:%.*]] = load i32, i32 addrspace(4)* [[BC]], align 4
-; AKF_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+; AKF_HSA-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(4) [[DISPATCH_PTR]], align 4
+; AKF_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_queue_ptr
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR11:[0-9]+]] {
-; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-; ATTRIBUTOR_HSA-NEXT:    [[BC:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
-; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = load i32, i32 addrspace(4)* [[BC]], align 4
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR11:[0-9]+]] {
+; ATTRIBUTOR_HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+; ATTRIBUTOR_HSA-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(4) [[DISPATCH_PTR]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-  %bc = bitcast i8 addrspace(4)* %dispatch.ptr to i32 addrspace(4)*
-  %val = load i32, i32 addrspace(4)* %bc
-  store i32 %val, i32 addrspace(1)* %ptr
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+  %val = load i32, ptr addrspace(4) %dispatch.ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_kernarg_segment_ptr(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_kernarg_segment_ptr(ptr addrspace(1) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_kernarg_segment_ptr
-; HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-; HSA-NEXT:    [[BC:%.*]] = bitcast i8 addrspace(4)* [[DISPATCH_PTR]] to i32 addrspace(4)*
-; HSA-NEXT:    [[VAL:%.*]] = load i32, i32 addrspace(4)* [[BC]], align 4
-; HSA-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+; HSA-NEXT:    [[VAL:%.*]] = load i32, ptr addrspace(4) [[DISPATCH_PTR]], align 4
+; HSA-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr()
-  %bc = bitcast i8 addrspace(4)* %dispatch.ptr to i32 addrspace(4)*
-  %val = load i32, i32 addrspace(4)* %bc
-  store i32 %val, i32 addrspace(1)* %ptr
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+  %val = load i32, ptr addrspace(4) %dispatch.ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 {
+define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast
-; AKF_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32*
-; AKF_HSA-NEXT:    store volatile i32 0, i32* [[STOF]], align 4
+; AKF_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr
+; AKF_HSA-NEXT:    store volatile i32 0, ptr [[STOF]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_group_to_flat_addrspacecast
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(3)* [[PTR:%.*]]) #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(3)* [[PTR]] to i32*
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32* [[STOF]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(3) [[PTR:%.*]]) #[[ATTR11]] {
+; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(3) [[PTR]] to ptr
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr [[STOF]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(3)* %ptr to i32*
-  store volatile i32 0, i32* %stof
+  %stof = addrspacecast ptr addrspace(3) %ptr to ptr
+  store volatile i32 0, ptr %stof
   ret void
 }
 
-define amdgpu_kernel void @use_private_to_flat_addrspacecast(i32 addrspace(5)* %ptr) #1 {
+define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_private_to_flat_addrspacecast
-; AKF_HSA-SAME: (i32 addrspace(5)* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(5)* [[PTR]] to i32*
-; AKF_HSA-NEXT:    store volatile i32 0, i32* [[STOF]], align 4
+; AKF_HSA-SAME: (ptr addrspace(5) [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(5) [[PTR]] to ptr
+; AKF_HSA-NEXT:    store volatile i32 0, ptr [[STOF]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_private_to_flat_addrspacecast
-; ATTRIBUTOR_HSA-SAME: (i32 addrspace(5)* [[PTR:%.*]]) #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(5)* [[PTR]] to i32*
-; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, i32* [[STOF]], align 4
+; ATTRIBUTOR_HSA-SAME: (ptr addrspace(5) [[PTR:%.*]]) #[[ATTR11]] {
+; ATTRIBUTOR_HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(5) [[PTR]] to ptr
+; ATTRIBUTOR_HSA-NEXT:    store volatile i32 0, ptr [[STOF]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(5)* %ptr to i32*
-  store volatile i32 0, i32* %stof
+  %stof = addrspacecast ptr addrspace(5) %ptr to ptr
+  store volatile i32 0, ptr %stof
   ret void
 }
 
-define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_group_addrspacecast(ptr %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_flat_to_group_addrspacecast
-; HSA-SAME: (i32* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast i32* [[PTR]] to i32 addrspace(3)*
-; HSA-NEXT:    store volatile i32 0, i32 addrspace(3)* [[FTOS]], align 4
+; HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(3)
+; HSA-NEXT:    store volatile i32 0, ptr addrspace(3) [[FTOS]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(3)*
-  store volatile i32 0, i32 addrspace(3)* %ftos
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(3)
+  store volatile i32 0, ptr addrspace(3) %ftos
   ret void
 }
 
-define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_flat_to_private_addrspacecast
-; HSA-SAME: (i32* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast i32* [[PTR]] to i32 addrspace(5)*
-; HSA-NEXT:    store volatile i32 0, i32 addrspace(5)* [[FTOS]], align 4
+; HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
+; HSA-NEXT:    store volatile i32 0, ptr addrspace(5) [[FTOS]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(5)*
-  store volatile i32 0, i32 addrspace(5)* %ftos
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(5)
+  store volatile i32 0, ptr addrspace(5) %ftos
   ret void
 }
 
 ; No-op addrspacecast should not use queue ptr
-define amdgpu_kernel void @use_global_to_flat_addrspacecast(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_global_to_flat_addrspacecast
-; HSA-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(1)* [[PTR]] to i32*
-; HSA-NEXT:    store volatile i32 0, i32* [[STOF]], align 4
+; HSA-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(1) [[PTR]] to ptr
+; HSA-NEXT:    store volatile i32 0, ptr [[STOF]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(1)* %ptr to i32*
-  store volatile i32 0, i32* %stof
+  %stof = addrspacecast ptr addrspace(1) %ptr to ptr
+  store volatile i32 0, ptr %stof
   ret void
 }
 
-define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(4)* %ptr) #1 {
+define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_constant_to_flat_addrspacecast
-; HSA-SAME: (i32 addrspace(4)* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[STOF:%.*]] = addrspacecast i32 addrspace(4)* [[PTR]] to i32*
-; HSA-NEXT:    [[LD:%.*]] = load volatile i32, i32* [[STOF]], align 4
+; HSA-SAME: (ptr addrspace(4) [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[STOF:%.*]] = addrspacecast ptr addrspace(4) [[PTR]] to ptr
+; HSA-NEXT:    [[LD:%.*]] = load volatile i32, ptr [[STOF]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %stof = addrspacecast i32 addrspace(4)* %ptr to i32*
-  %ld = load volatile i32, i32* %stof
+  %stof = addrspacecast ptr addrspace(4) %ptr to ptr
+  %ld = load volatile i32, ptr %stof
   ret void
 }
 
-define amdgpu_kernel void @use_flat_to_global_addrspacecast(i32* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_global_addrspacecast(ptr %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_flat_to_global_addrspacecast
-; HSA-SAME: (i32* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast i32* [[PTR]] to i32 addrspace(1)*
-; HSA-NEXT:    store volatile i32 0, i32 addrspace(1)* [[FTOS]], align 4
+; HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(1)
+; HSA-NEXT:    store volatile i32 0, ptr addrspace(1) [[FTOS]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(1)*
-  store volatile i32 0, i32 addrspace(1)* %ftos
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(1)
+  store volatile i32 0, ptr addrspace(1) %ftos
   ret void
 }
 
-define amdgpu_kernel void @use_flat_to_constant_addrspacecast(i32* %ptr) #1 {
+define amdgpu_kernel void @use_flat_to_constant_addrspacecast(ptr %ptr) #1 {
 ; HSA-LABEL: define {{[^@]+}}@use_flat_to_constant_addrspacecast
-; HSA-SAME: (i32* [[PTR:%.*]]) #[[ATTR1]] {
-; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast i32* [[PTR]] to i32 addrspace(4)*
-; HSA-NEXT:    [[LD:%.*]] = load volatile i32, i32 addrspace(4)* [[FTOS]], align 4
+; HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; HSA-NEXT:    [[FTOS:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(4)
+; HSA-NEXT:    [[LD:%.*]] = load volatile i32, ptr addrspace(4) [[FTOS]], align 4
 ; HSA-NEXT:    ret void
 ;
-  %ftos = addrspacecast i32* %ptr to i32 addrspace(4)*
-  %ld = load volatile i32, i32 addrspace(4)* %ftos
+  %ftos = addrspacecast ptr %ptr to ptr addrspace(4)
+  %ld = load volatile i32, ptr addrspace(4) %ftos
   ret void
 }
 
-define amdgpu_kernel void @use_is_shared(i8* %ptr) #1 {
+define amdgpu_kernel void @use_is_shared(ptr %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_is_shared
-; AKF_HSA-SAME: (i8* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[PTR]])
+; AKF_HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[PTR]])
 ; AKF_HSA-NEXT:    [[EXT:%.*]] = zext i1 [[IS_SHARED]] to i32
-; AKF_HSA-NEXT:    store i32 [[EXT]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store i32 [[EXT]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_is_shared
-; ATTRIBUTOR_HSA-SAME: (i8* [[PTR:%.*]]) #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[PTR]])
+; ATTRIBUTOR_HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR11]] {
+; ATTRIBUTOR_HSA-NEXT:    [[IS_SHARED:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[PTR]])
 ; ATTRIBUTOR_HSA-NEXT:    [[EXT:%.*]] = zext i1 [[IS_SHARED]] to i32
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[EXT]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[EXT]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %ptr)
   %ext = zext i1 %is.shared to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
-define amdgpu_kernel void @use_is_private(i8* %ptr) #1 {
+define amdgpu_kernel void @use_is_private(ptr %ptr) #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_is_private
-; AKF_HSA-SAME: (i8* [[PTR:%.*]]) #[[ATTR1]] {
-; AKF_HSA-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(i8* [[PTR]])
+; AKF_HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_HSA-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR]])
 ; AKF_HSA-NEXT:    [[EXT:%.*]] = zext i1 [[IS_PRIVATE]] to i32
-; AKF_HSA-NEXT:    store i32 [[EXT]], i32 addrspace(1)* undef, align 4
+; AKF_HSA-NEXT:    store i32 [[EXT]], ptr addrspace(1) undef, align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_is_private
-; ATTRIBUTOR_HSA-SAME: (i8* [[PTR:%.*]]) #[[ATTR11]] {
-; ATTRIBUTOR_HSA-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(i8* [[PTR]])
+; ATTRIBUTOR_HSA-SAME: (ptr [[PTR:%.*]]) #[[ATTR11]] {
+; ATTRIBUTOR_HSA-NEXT:    [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR]])
 ; ATTRIBUTOR_HSA-NEXT:    [[EXT:%.*]] = zext i1 [[IS_PRIVATE]] to i32
-; ATTRIBUTOR_HSA-NEXT:    store i32 [[EXT]], i32 addrspace(1)* undef, align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 [[EXT]], ptr addrspace(1) undef, align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
-  %is.private = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %ptr)
   %ext = zext i1 %is.private to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
@@ -578,17 +570,17 @@ define amdgpu_kernel void @use_alloca() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_alloca
 ; AKF_HSA-SAME: () #[[ATTR2:[0-9]+]] {
 ; AKF_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; AKF_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; AKF_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_alloca
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR1]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; ATTRIBUTOR_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %alloca = alloca i32, addrspace(5)
-  store i32 0, i32 addrspace(5)* %alloca
+  store i32 0, ptr addrspace(5) %alloca
   ret void
 }
 
@@ -599,7 +591,7 @@ define amdgpu_kernel void @use_alloca_non_entry_block() #1 {
 ; AKF_HSA-NEXT:    br label [[BB:%.*]]
 ; AKF_HSA:       bb:
 ; AKF_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; AKF_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; AKF_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_alloca_non_entry_block
@@ -608,7 +600,7 @@ define amdgpu_kernel void @use_alloca_non_entry_block() #1 {
 ; ATTRIBUTOR_HSA-NEXT:    br label [[BB:%.*]]
 ; ATTRIBUTOR_HSA:       bb:
 ; ATTRIBUTOR_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; ATTRIBUTOR_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
 entry:
@@ -616,7 +608,7 @@ entry:
 
 bb:
   %alloca = alloca i32, addrspace(5)
-  store i32 0, i32 addrspace(5)* %alloca
+  store i32 0, ptr addrspace(5) %alloca
   ret void
 }
 
@@ -624,17 +616,17 @@ define void @use_alloca_func() #1 {
 ; AKF_HSA-LABEL: define {{[^@]+}}@use_alloca_func
 ; AKF_HSA-SAME: () #[[ATTR2]] {
 ; AKF_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; AKF_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; AKF_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; AKF_HSA-NEXT:    ret void
 ;
 ; ATTRIBUTOR_HSA-LABEL: define {{[^@]+}}@use_alloca_func
 ; ATTRIBUTOR_HSA-SAME: () #[[ATTR1]] {
 ; ATTRIBUTOR_HSA-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; ATTRIBUTOR_HSA-NEXT:    store i32 0, i32 addrspace(5)* [[ALLOCA]], align 4
+; ATTRIBUTOR_HSA-NEXT:    store i32 0, ptr addrspace(5) [[ALLOCA]], align 4
 ; ATTRIBUTOR_HSA-NEXT:    ret void
 ;
   %alloca = alloca i32, addrspace(5)
-  store i32 0, i32 addrspace(5)* %alloca
+  store i32 0, ptr addrspace(5) %alloca
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index bda5a5b48b69..2f899440b74d 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -14,329 +14,329 @@ declare i32 @llvm.r600.read.local.size.x() #0
 declare i32 @llvm.r600.read.local.size.y() #0
 declare i32 @llvm.r600.read.local.size.z() #0
 
-define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_tgid_x
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
 ; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.x()
-; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tgid.x()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_y
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; AKF_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_y
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tgid.y()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @multi_use_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tgid.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
   %val1 = call i32 @llvm.r600.read.tgid.y()
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR2]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tgid.x()
   %val1 = call i32 @llvm.r600.read.tgid.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; AKF_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tgid.z()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tgid.x()
   %val1 = call i32 @llvm.r600.read.tgid.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tgid.y()
   %val1 = call i32 @llvm.r600.read.tgid.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tgid_x_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; AKF_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR4]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tgid.x()
   %val1 = call i32 @llvm.r600.read.tgid.y()
   %val2 = call i32 @llvm.r600.read.tgid.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_x
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.x()
-; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tidig.x()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_y
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
-; AKF_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_y
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
-; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tidig.y()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
-; AKF_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
-; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.tidig.z()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_tgid_x(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
 ; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.x()
-; CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tidig.x()
   %val1 = call i32 @llvm.r600.read.tgid.x()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_y_tgid_y(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tidig.y()
   %val1 = call i32 @llvm.r600.read.tgid.y()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_tidig_x_y_z(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; AKF_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tidig.x()
   %val1 = call i32 @llvm.r600.read.tidig.y()
   %val2 = call i32 @llvm.r600.read.tidig.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_all_workitems(ptr addrspace(1) %ptr) #1 {
 ; AKF_CHECK-LABEL: define {{[^@]+}}@use_all_workitems
-; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; AKF_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; AKF_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
 ; AKF_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; AKF_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
 ; AKF_CHECK-NEXT:    [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; AKF_CHECK-NEXT:    [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; AKF_CHECK-NEXT:    [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
-; AKF_CHECK-NEXT:    store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL3]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL4]], ptr addrspace(1) [[PTR]], align 4
+; AKF_CHECK-NEXT:    store volatile i32 [[VAL5]], ptr addrspace(1) [[PTR]], align 4
 ; AKF_CHECK-NEXT:    ret void
 ;
 ; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_all_workitems
-; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
+; ATTRIBUTOR_CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
 ; ATTRIBUTOR_CHECK-NEXT:    [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
-; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL3]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL4]], ptr addrspace(1) [[PTR]], align 4
+; ATTRIBUTOR_CHECK-NEXT:    store volatile i32 [[VAL5]], ptr addrspace(1) [[PTR]], align 4
 ; ATTRIBUTOR_CHECK-NEXT:    ret void
 ;
   %val0 = call i32 @llvm.r600.read.tidig.x()
@@ -345,48 +345,48 @@ define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
   %val3 = call i32 @llvm.r600.read.tgid.x()
   %val4 = call i32 @llvm.r600.read.tgid.y()
   %val5 = call i32 @llvm.r600.read.tgid.z()
-  store volatile i32 %val0, i32 addrspace(1)* %ptr
-  store volatile i32 %val1, i32 addrspace(1)* %ptr
-  store volatile i32 %val2, i32 addrspace(1)* %ptr
-  store volatile i32 %val3, i32 addrspace(1)* %ptr
-  store volatile i32 %val4, i32 addrspace(1)* %ptr
-  store volatile i32 %val5, i32 addrspace(1)* %ptr
+  store volatile i32 %val0, ptr addrspace(1) %ptr
+  store volatile i32 %val1, ptr addrspace(1) %ptr
+  store volatile i32 %val2, ptr addrspace(1) %ptr
+  store volatile i32 %val3, ptr addrspace(1) %ptr
+  store volatile i32 %val4, ptr addrspace(1) %ptr
+  store volatile i32 %val5, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_x(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_x
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.x()
-; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.local.size.x()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_y(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_y
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.y()
-; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.local.size.y()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 
-define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
+define amdgpu_kernel void @use_get_local_size_z(ptr addrspace(1) %ptr) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_z
-; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: (ptr addrspace(1) [[PTR:%.*]]) #[[ATTR1]] {
 ; CHECK-NEXT:    [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.z()
-; CHECK-NEXT:    store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[PTR]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %val = call i32 @llvm.r600.read.local.size.z()
-  store i32 %val, i32 addrspace(1)* %ptr
+  store i32 %val, ptr addrspace(1) %ptr
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/annotate-noclobber.ll b/llvm/test/CodeGen/AMDGPU/annotate-noclobber.ll
index 48e266ae3230..4b20a3ef4027 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-noclobber.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-noclobber.ll
@@ -3,45 +3,33 @@ target datalayout = "A5"
 
 
 ; OPT-LABEL: @amdgpu_noclobber_global(
-; OPT:      %addr = getelementptr i32, i32 addrspace(1)* %in, i64 0, !amdgpu.uniform !0
-; OPT-NEXT: %load = load i32, i32 addrspace(1)* %addr, align 4, !amdgpu.noclobber !0
-define amdgpu_kernel void @amdgpu_noclobber_global( i32 addrspace(1)* %in,  i32 addrspace(1)* %out) {
-entry:
-  %addr = getelementptr i32, i32 addrspace(1)* %in, i64 0
-  %load = load i32, i32 addrspace(1)* %addr, align 4
-  store i32 %load, i32 addrspace(1)* %out, align 4
+; OPT-NEXT: %load = load i32, ptr addrspace(1) %in, align 4, !amdgpu.noclobber !0
+define amdgpu_kernel void @amdgpu_noclobber_global( ptr addrspace(1) %in,  ptr addrspace(1) %out) {
+  %load = load i32, ptr addrspace(1) %in, align 4
+  store i32 %load, ptr addrspace(1) %out, align 4
   ret void
 }
 
 ; OPT-LABEL: @amdgpu_noclobber_local(
-; OPT:      %addr = getelementptr i32, i32 addrspace(3)* %in, i64 0, !amdgpu.uniform !0
-; OPT-NEXT: %load = load i32, i32 addrspace(3)* %addr, align 4
-define amdgpu_kernel void @amdgpu_noclobber_local( i32 addrspace(3)* %in,  i32 addrspace(1)* %out) {
-entry:
-  %addr = getelementptr i32, i32 addrspace(3)* %in, i64 0
-  %load = load i32, i32 addrspace(3)* %addr, align 4
-  store i32 %load, i32 addrspace(1)* %out, align 4
+; OPT-NEXT: %load = load i32, ptr addrspace(3) %in, align 4
+define amdgpu_kernel void @amdgpu_noclobber_local( ptr addrspace(3) %in,  ptr addrspace(1) %out) {
+  %load = load i32, ptr addrspace(3) %in, align 4
+  store i32 %load, ptr addrspace(1) %out, align 4
   ret void
 }
 
 ; OPT-LABEL: @amdgpu_noclobber_private(
-; OPT:      %addr = getelementptr i32, i32 addrspace(5)* %in, i64 0, !amdgpu.uniform !0
-; OPT-NEXT: %load = load i32, i32 addrspace(5)* %addr, align 4
-define amdgpu_kernel void @amdgpu_noclobber_private( i32 addrspace(5)* %in,  i32 addrspace(1)* %out) {
-entry:
-  %addr = getelementptr i32, i32 addrspace(5)* %in, i64 0
-  %load = load i32, i32 addrspace(5)* %addr, align 4
-  store i32 %load, i32 addrspace(1)* %out, align 4
+; OPT-NEXT: %load = load i32, ptr addrspace(5) %in, align 4
+define amdgpu_kernel void @amdgpu_noclobber_private( ptr addrspace(5) %in,  ptr addrspace(1) %out) {
+  %load = load i32, ptr addrspace(5) %in, align 4
+  store i32 %load, ptr addrspace(1) %out, align 4
   ret void
 }
 
 ; OPT-LABEL: @amdgpu_noclobber_flat(
-; OPT:      %addr = getelementptr i32, i32 addrspace(4)* %in, i64 0, !amdgpu.uniform !0
-; OPT-NEXT: %load = load i32, i32 addrspace(4)* %addr, align 4
-define amdgpu_kernel void @amdgpu_noclobber_flat( i32 addrspace(4)* %in,  i32 addrspace(1)* %out) {
-entry:
-  %addr = getelementptr i32, i32 addrspace(4)* %in, i64 0
-  %load = load i32, i32 addrspace(4)* %addr, align 4
-  store i32 %load, i32 addrspace(1)* %out, align 4
+; OPT-NEXT: %load = load i32, ptr addrspace(4) %in, align 4
+define amdgpu_kernel void @amdgpu_noclobber_flat( ptr addrspace(4) %in,  ptr addrspace(1) %out) {
+  %load = load i32, ptr addrspace(4) %in, align 4
+  store i32 %load, ptr addrspace(1) %out, align 4
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index 355a72232d35..acad79d009a4 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX940 %s
 ; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX1100 %s
 
-define float @syncscope_system(float* %addr, float %val) #0 {
+define float @syncscope_system(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: syncscope_system:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -104,11 +104,11 @@ define float @syncscope_system(float* %addr, float %val) #0 {
 ; GFX1100-NEXT:    s_or_b32 exec_lo, exec_lo, s0
 ; GFX1100-NEXT:    v_mov_b32_e32 v0, v3
 ; GFX1100-NEXT:    s_setpc_b64 s[30:31]
-  %res = atomicrmw fadd float* %addr, float %val seq_cst
+  %res = atomicrmw fadd ptr %addr, float %val seq_cst
   ret float %res
 }
 
-define float @syncscope_workgroup_rtn(float* %addr, float %val) #0 {
+define float @syncscope_workgroup_rtn(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: syncscope_workgroup_rtn:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -132,7 +132,7 @@ define float @syncscope_workgroup_rtn(float* %addr, float %val) #0 {
 ; GFX908-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX90A-LABEL: syncscope_workgroup_rtn:
-; GFX90A:       ; %bb.0: ; %atomicrmw.check.shared
+; GFX90A:       ; %bb.0:
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    s_mov_b64 s[4:5], src_shared_base
 ; GFX90A-NEXT:    v_cmp_ne_u32_e32 vcc, s5, v1
@@ -196,11 +196,11 @@ define float @syncscope_workgroup_rtn(float* %addr, float %val) #0 {
 ; GFX1100-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX1100-NEXT:    buffer_gl0_inv
 ; GFX1100-NEXT:    s_setpc_b64 s[30:31]
-  %res = atomicrmw fadd float* %addr, float %val syncscope("workgroup") seq_cst
+  %res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
   ret float %res
 }
 
-define void @syncscope_workgroup_nortn(float* %addr, float %val) #0 {
+define void @syncscope_workgroup_nortn(ptr %addr, float %val) #0 {
 ; GFX908-LABEL: syncscope_workgroup_nortn:
 ; GFX908:       ; %bb.0: ; %atomicrmw.check.shared
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -321,11 +321,11 @@ define void @syncscope_workgroup_nortn(float* %addr, float %val) #0 {
 ; GFX1100-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX1100-NEXT:    buffer_gl0_inv
 ; GFX1100-NEXT:    s_setpc_b64 s[30:31]
-  %res = atomicrmw fadd float* %addr, float %val syncscope("workgroup") seq_cst
+  %res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
   ret void
 }
 
-define float @no_unsafe(float* %addr, float %val) {
+define float @no_unsafe(ptr %addr, float %val) {
 ; GFX908-LABEL: no_unsafe:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -418,7 +418,7 @@ define float @no_unsafe(float* %addr, float %val) {
 ; GFX1100-NEXT:    s_or_b32 exec_lo, exec_lo, s0
 ; GFX1100-NEXT:    v_mov_b32_e32 v0, v3
 ; GFX1100-NEXT:    s_setpc_b64 s[30:31]
-  %res = atomicrmw fadd float* %addr, float %val syncscope("workgroup") seq_cst
+  %res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
   ret float %res
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
index ab7f9a1032b1..ed045107d354 100644
--- a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
@@ -7,29 +7,29 @@
 ; ALL: SGPRBlocks: 1
 ; ALL: NumSGPRsForWavesPerEU: 10
 define amdgpu_kernel void @max_10_sgprs() #0 {
-  %one = load volatile i32, i32 addrspace(4)* undef
-  %two = load volatile i32, i32 addrspace(4)* undef
-  %three = load volatile i32, i32 addrspace(4)* undef
-  %four = load volatile i32, i32 addrspace(4)* undef
-  %five = load volatile i32, i32 addrspace(4)* undef
-  %six = load volatile i32, i32 addrspace(4)* undef
-  %seven = load volatile i32, i32 addrspace(4)* undef
-  %eight = load volatile i32, i32 addrspace(4)* undef
-  %nine = load volatile i32, i32 addrspace(4)* undef
-  %ten = load volatile i32, i32 addrspace(4)* undef
-  %eleven = load volatile i32, i32 addrspace(4)* undef
+  %one = load volatile i32, ptr addrspace(4) undef
+  %two = load volatile i32, ptr addrspace(4) undef
+  %three = load volatile i32, ptr addrspace(4) undef
+  %four = load volatile i32, ptr addrspace(4) undef
+  %five = load volatile i32, ptr addrspace(4) undef
+  %six = load volatile i32, ptr addrspace(4) undef
+  %seven = load volatile i32, ptr addrspace(4) undef
+  %eight = load volatile i32, ptr addrspace(4) undef
+  %nine = load volatile i32, ptr addrspace(4) undef
+  %ten = load volatile i32, ptr addrspace(4) undef
+  %eleven = load volatile i32, ptr addrspace(4) undef
   call void asm sideeffect "", "s,s,s,s,s,s,s,s,s,s"(i32 %one, i32 %two, i32 %three, i32 %four, i32 %five, i32 %six, i32 %seven, i32 %eight, i32 %nine, i32 %ten)
-  store volatile i32 %one, i32 addrspace(1)* undef
-  store volatile i32 %two, i32 addrspace(1)* undef
-  store volatile i32 %three, i32 addrspace(1)* undef
-  store volatile i32 %four, i32 addrspace(1)* undef
-  store volatile i32 %five, i32 addrspace(1)* undef
-  store volatile i32 %six, i32 addrspace(1)* undef
-  store volatile i32 %seven, i32 addrspace(1)* undef
-  store volatile i32 %eight, i32 addrspace(1)* undef
-  store volatile i32 %nine, i32 addrspace(1)* undef
-  store volatile i32 %ten, i32 addrspace(1)* undef
-  store volatile i32 %eleven, i32 addrspace(1)* undef
+  store volatile i32 %one, ptr addrspace(1) undef
+  store volatile i32 %two, ptr addrspace(1) undef
+  store volatile i32 %three, ptr addrspace(1) undef
+  store volatile i32 %four, ptr addrspace(1) undef
+  store volatile i32 %five, ptr addrspace(1) undef
+  store volatile i32 %six, ptr addrspace(1) undef
+  store volatile i32 %seven, ptr addrspace(1) undef
+  store volatile i32 %eight, ptr addrspace(1) undef
+  store volatile i32 %nine, ptr addrspace(1) undef
+  store volatile i32 %ten, ptr addrspace(1) undef
+  store volatile i32 %eleven, ptr addrspace(1) undef
   ret void
 }
 
@@ -57,32 +57,32 @@ define amdgpu_kernel void @max_10_sgprs() #0 {
 ; creates an extra vreg to save/restore m0 which in a point of maximum register
 ; pressure would trigger an endless loop; the compiler aborts earlier with
 ; "Incomplete scavenging after 2nd pass" in practice.
-;define amdgpu_kernel void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
-;                                        i32 addrspace(1)* %out2,
-;                                        i32 addrspace(1)* %out3,
-;                                        i32 addrspace(1)* %out4,
+;define amdgpu_kernel void @max_12_sgprs_14_input_sgprs(ptr addrspace(1) %out1,
+;                                        ptr addrspace(1) %out2,
+;                                        ptr addrspace(1) %out3,
+;                                        ptr addrspace(1) %out4,
 ;                                        i32 %one, i32 %two, i32 %three, i32 %four) #2 {
 ;  %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
 ;  %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
 ;  %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
 ;  %x.3 = call i64 @llvm.amdgcn.dispatch.id()
-;  %x.4 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-;  %x.5 = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-;  store volatile i32 0, i32* undef
+;  %x.4 = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+;  %x.5 = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+;  store volatile i32 0, ptr undef
 ;  br label %stores
 ;
 ;stores:
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
-;  store volatile i64 %x.3, i64 addrspace(1)* undef
-;  store volatile i8 addrspace(4)* %x.4, i8 addrspace(4)* addrspace(1)* undef
-;  store volatile i8 addrspace(4)* %x.5, i8 addrspace(4)* addrspace(1)* undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
+;  store volatile i64 %x.3, ptr addrspace(1) undef
+;  store volatile ptr addrspace(4) %x.4, ptr addrspace(1) undef
+;  store volatile ptr addrspace(4) %x.5, ptr addrspace(1) undef
 ;
-;  store i32 %one, i32 addrspace(1)* %out1
-;  store i32 %two, i32 addrspace(1)* %out2
-;  store i32 %three, i32 addrspace(1)* %out3
-;  store i32 %four, i32 addrspace(1)* %out4
+;  store i32 %one, ptr addrspace(1) %out1
+;  store i32 %two, ptr addrspace(1) %out2
+;  store i32 %three, ptr addrspace(1) %out3
+;  store i32 %four, ptr addrspace(1) %out4
 ;  ret void
 ;}
 
@@ -94,27 +94,27 @@ define amdgpu_kernel void @max_10_sgprs() #0 {
 
 ; XALL: SGPRBlocks: 2
 ; XALL: NumSGPRsForWavesPerEU: 18
-;define amdgpu_kernel void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
-;                                        i32 addrspace(1)* %out2,
-;                                        i32 addrspace(1)* %out3,
-;                                        i32 addrspace(1)* %out4,
+;define amdgpu_kernel void @max_12_sgprs_12_input_sgprs(ptr addrspace(1) %out1,
+;                                        ptr addrspace(1) %out2,
+;                                        ptr addrspace(1) %out3,
+;                                        ptr addrspace(1) %out4,
 ;                                        i32 %one, i32 %two, i32 %three, i32 %four) #2 {
-;  store volatile i32 0, i32* undef
+;  store volatile i32 0, ptr undef
 ;  %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
 ;  %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
 ;  %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
-;  store volatile i32 %x.0, i32 addrspace(1)* undef
+;  store volatile i32 %x.0, ptr addrspace(1) undef
 ;  %x.3 = call i64 @llvm.amdgcn.dispatch.id()
-;  store volatile i64 %x.3, i64 addrspace(1)* undef
-;  %x.4 = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-;  store volatile i8 addrspace(4)* %x.4, i8 addrspace(4)* addrspace(1)* undef
+;  store volatile i64 %x.3, ptr addrspace(1) undef
+;  %x.4 = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+;  store volatile ptr addrspace(4) %x.4, ptr addrspace(1) undef
 ;
-;  store i32 %one, i32 addrspace(1)* %out1
-;  store i32 %two, i32 addrspace(1)* %out2
-;  store i32 %three, i32 addrspace(1)* %out3
-;  store i32 %four, i32 addrspace(1)* %out4
+;  store i32 %one, ptr addrspace(1) %out1
+;  store i32 %two, ptr addrspace(1) %out2
+;  store i32 %three, ptr addrspace(1) %out3
+;  store i32 %four, ptr addrspace(1) %out4
 ;  ret void
 ;}
 
@@ -122,8 +122,8 @@ declare i32 @llvm.amdgcn.workgroup.id.x() #1
 declare i32 @llvm.amdgcn.workgroup.id.y() #1
 declare i32 @llvm.amdgcn.workgroup.id.z() #1
 declare i64 @llvm.amdgcn.dispatch.id() #1
-declare i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() #1
-declare i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #1
+declare ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #1
+declare ptr addrspace(4) @llvm.amdgcn.queue.ptr() #1
 
 attributes #0 = { nounwind "amdgpu-num-sgpr"="14" }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll b/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
index 5209b2bf7f3c..c98da8126474 100644
--- a/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
+++ b/llvm/test/CodeGen/AMDGPU/cse-phi-incoming-val.ll
@@ -21,8 +21,8 @@ bb3:                                              ; preds = %bb
   br i1 %tmp4, label %bb5, label %bb10
 
 bb5:                                              ; preds = %bb3
-  %tmp6 = getelementptr <{ [4294967295 x i32] }>, <{ [4294967295 x i32] }> addrspace(6)* null, i32 0, i32 0, i32 %arg
-  %tmp7 = load i32, i32 addrspace(6)* %tmp6
+  %tmp6 = getelementptr <{ [4294967295 x i32] }>, ptr addrspace(6) null, i32 0, i32 0, i32 %arg
+  %tmp7 = load i32, ptr addrspace(6) %tmp6
   %tmp8 = icmp eq i32 %tmp7, 1
   br i1 %tmp8, label %bb10, label %bb9
 

diff  --git a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
index bf09acedb01b..f6403d381bf7 100644
--- a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
@@ -12,15 +12,15 @@ define internal void @indirect() {
 define internal void @direct() {
 ; CHECK-LABEL: define {{[^@]+}}@direct
 ; CHECK-SAME: () #[[ATTR1:[0-9]+]] {
-; CHECK-NEXT:    [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
-; CHECK-NEXT:    store void ()* @indirect, void ()* addrspace(5)* [[FPTR]], align 8
-; CHECK-NEXT:    [[FP:%.*]] = load void ()*, void ()* addrspace(5)* [[FPTR]], align 8
+; CHECK-NEXT:    [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
+; CHECK-NEXT:    store ptr @indirect, ptr addrspace(5) [[FPTR]], align 8
+; CHECK-NEXT:    [[FP:%.*]] = load ptr, ptr addrspace(5) [[FPTR]], align 8
 ; CHECK-NEXT:    call void [[FP]]()
 ; CHECK-NEXT:    ret void
 ;
-  %fptr = alloca void()*, addrspace(5)
-  store void()* @indirect, void()* addrspace(5)* %fptr
-  %fp = load void()*, void()* addrspace(5)* %fptr
+  %fptr = alloca ptr, addrspace(5)
+  store ptr @indirect, ptr addrspace(5) %fptr
+  %fp = load ptr, ptr addrspace(5) %fptr
   call void %fp()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
index 4ab4717d593f..4e7b68c38895 100644
--- a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
+++ b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
@@ -16,23 +16,23 @@ define internal void @indirect() {
 define amdgpu_kernel void @test_simple_indirect_call() #0 {
 ; AKF_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
 ; AKF_GCN-SAME: () #[[ATTR0:[0-9]+]] {
-; AKF_GCN-NEXT:    [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
-; AKF_GCN-NEXT:    store void ()* @indirect, void ()* addrspace(5)* [[FPTR]], align 8
-; AKF_GCN-NEXT:    [[FP:%.*]] = load void ()*, void ()* addrspace(5)* [[FPTR]], align 8
+; AKF_GCN-NEXT:    [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
+; AKF_GCN-NEXT:    store ptr @indirect, ptr addrspace(5) [[FPTR]], align 8
+; AKF_GCN-NEXT:    [[FP:%.*]] = load ptr, ptr addrspace(5) [[FPTR]], align 8
 ; AKF_GCN-NEXT:    call void [[FP]]()
 ; AKF_GCN-NEXT:    ret void
 ;
 ; ATTRIBUTOR_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
 ; ATTRIBUTOR_GCN-SAME: () #[[ATTR1:[0-9]+]] {
-; ATTRIBUTOR_GCN-NEXT:    [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
-; ATTRIBUTOR_GCN-NEXT:    store void ()* @indirect, void ()* addrspace(5)* [[FPTR]], align 8
-; ATTRIBUTOR_GCN-NEXT:    [[FP:%.*]] = load void ()*, void ()* addrspace(5)* [[FPTR]], align 8
+; ATTRIBUTOR_GCN-NEXT:    [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
+; ATTRIBUTOR_GCN-NEXT:    store ptr @indirect, ptr addrspace(5) [[FPTR]], align 8
+; ATTRIBUTOR_GCN-NEXT:    [[FP:%.*]] = load ptr, ptr addrspace(5) [[FPTR]], align 8
 ; ATTRIBUTOR_GCN-NEXT:    call void [[FP]]()
 ; ATTRIBUTOR_GCN-NEXT:    ret void
 ;
-  %fptr = alloca void()*, addrspace(5)
-  store void()* @indirect, void()* addrspace(5)* %fptr
-  %fp = load void()*, void()* addrspace(5)* %fptr
+  %fptr = alloca ptr, addrspace(5)
+  store ptr @indirect, ptr addrspace(5) %fptr
+  %fp = load ptr, ptr addrspace(5) %fptr
   call void %fp()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg-v3.ll
index 4faf1df78e8c..f5fc387f419b 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg-v3.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg-v3.ll
@@ -19,7 +19,7 @@
 ; CHECK:          .name:           test_ro_arg
 ; CHECK:          .symbol:         test_ro_arg.kd
 
-define amdgpu_kernel void @test_ro_arg(float addrspace(1)* noalias readonly %in, float addrspace(1)* %out)
+define amdgpu_kernel void @test_ro_arg(ptr addrspace(1) noalias readonly %in, ptr addrspace(1) %out)
     !kernel_arg_addr_space !0 !kernel_arg_access_qual !1 !kernel_arg_type !2
     !kernel_arg_base_type !2 !kernel_arg_type_qual !3 {
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg.ll
index da46f0e91164..bb8abe35e388 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-deduce-ro-arg.ll
@@ -20,7 +20,7 @@
 ; CHECK-NEXT:   AddrSpaceQual:   Global
 ; CHECK-NEXT:   AccQual:         Default
 
-define amdgpu_kernel void @test_ro_arg(float addrspace(1)* noalias readonly %in, float addrspace(1)* %out)
+define amdgpu_kernel void @test_ro_arg(ptr addrspace(1) noalias readonly %in, ptr addrspace(1) %out)
     !kernel_arg_addr_space !0 !kernel_arg_access_qual !1 !kernel_arg_type !2
     !kernel_arg_base_type !2 !kernel_arg_type_qual !3 {
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ctor-dtor-list.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ctor-dtor-list.ll
index 31ca10c2d361..3094fa1165fb 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ctor-dtor-list.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ctor-dtor-list.ll
@@ -9,12 +9,12 @@
 
 define internal void @foo() {
       ret void
-      
+
 }
 
 define internal void @foo.5() {
       ret void
-      
+
 }
 
 ; CHECK: ---
@@ -25,12 +25,12 @@ define internal void @foo.5() {
 
 define internal void @bar() {
       ret void
-      
+
 }
 
 define internal void @bar.5() {
       ret void
-      
+
 }
 
 ; CHECK: .kind: fini

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full-v3.ll
index 167c25436c5d..d9958947841f 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full-v3.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full-v3.ll
@@ -11,10 +11,10 @@
 %opencl.image3d_t = type opaque
 %opencl.queue_t = type opaque
 %opencl.pipe_t = type opaque
-%struct.B = type { i32 addrspace(1)*}
+%struct.B = type { ptr addrspace(1) }
 %opencl.clk_event_t = type opaque
 
- at __test_block_invoke_kernel_runtime_handle = external addrspace(1) externally_initialized constant i8 addrspace(1)*
+ at __test_block_invoke_kernel_runtime_handle = external addrspace(1) externally_initialized constant ptr addrspace(1)
 
 ; CHECK:              ---
 ; CHECK-NEXT: amdhsa.kernels:
@@ -80,7 +80,7 @@ define amdgpu_kernel void @test_char(i8 %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_char_byref_constant
 ; CHECK:          .symbol:         test_char_byref_constant.kd
-define amdgpu_kernel void @test_char_byref_constant(i8 addrspace(4)* byref(i8) %a) #0
+define amdgpu_kernel void @test_char_byref_constant(ptr addrspace(4) byref(i8) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !9
     !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
   ret void
@@ -118,7 +118,7 @@ define amdgpu_kernel void @test_char_byref_constant(i8 addrspace(4)* byref(i8) %
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_char_byref_constant_align512
 ; CHECK:          .symbol:         test_char_byref_constant_align512.kd
-define amdgpu_kernel void @test_char_byref_constant_align512(i8, i8 addrspace(4)* byref(i8) align(512) %a) #0
+define amdgpu_kernel void @test_char_byref_constant_align512(i8, ptr addrspace(4) byref(i8) align(512) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !111
     !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
   ret void
@@ -392,7 +392,7 @@ define amdgpu_kernel void @test_double16(<16 x double> %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_pointer
 ; CHECK:          .symbol:         test_pointer.kd
-define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a) #0
+define amdgpu_kernel void @test_pointer(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !16
     !kernel_arg_base_type !16 !kernel_arg_type_qual !4 {
   ret void
@@ -431,7 +431,7 @@ define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_image
 ; CHECK:          .symbol:         test_image.kd
-define amdgpu_kernel void @test_image(%opencl.image2d_t addrspace(1)* %a) #0
+define amdgpu_kernel void @test_image(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !17
     !kernel_arg_base_type !17 !kernel_arg_type_qual !4 {
   ret void
@@ -509,7 +509,7 @@ define amdgpu_kernel void @test_sampler(i32 %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_queue
 ; CHECK:          .symbol:         test_queue.kd
-define amdgpu_kernel void @test_queue(%opencl.queue_t addrspace(1)* %a) #0
+define amdgpu_kernel void @test_queue(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !19
     !kernel_arg_base_type !19 !kernel_arg_type_qual !4 {
   ret void
@@ -587,7 +587,7 @@ define amdgpu_kernel void @test_struct(%struct.A %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_struct_byref_constant
 ; CHECK:          .symbol:         test_struct_byref_constant.kd
-define amdgpu_kernel void @test_struct_byref_constant(%struct.A addrspace(4)* byref(%struct.A) %a) #0
+define amdgpu_kernel void @test_struct_byref_constant(ptr addrspace(4) byref(%struct.A) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20
     !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
   ret void
@@ -665,7 +665,7 @@ define amdgpu_kernel void @test_array([32 x i8] %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_array_byref_constant
 ; CHECK:          .symbol:         test_array_byref_constant.kd
-define amdgpu_kernel void @test_array_byref_constant([32 x i8] addrspace(4)* byref([32 x i8]) %a) #0
+define amdgpu_kernel void @test_array_byref_constant(ptr addrspace(4) byref([32 x i8]) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20
     !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
   ret void
@@ -806,9 +806,9 @@ define amdgpu_kernel void @test_multi_arg(i32 %a, <2 x i16> %b, <3 x i8> %c) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_addr_space
 ; CHECK:          .symbol:         test_addr_space.kd
-define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g,
-                                           i32 addrspace(4)* %c,
-                                           i32 addrspace(3)* align 4 %l) #0
+define amdgpu_kernel void @test_addr_space(ptr addrspace(1) %g,
+                                           ptr addrspace(4) %c,
+                                           ptr addrspace(3) align 4 %l) #0
     !kernel_arg_addr_space !50 !kernel_arg_access_qual !23 !kernel_arg_type !51
     !kernel_arg_base_type !51 !kernel_arg_type_qual !25 {
   ret void
@@ -863,9 +863,9 @@ define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g,
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_type_qual
 ; CHECK:          .symbol:         test_type_qual.kd
-define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a,
-                                          i32 addrspace(1)* %b,
-                                          %opencl.pipe_t addrspace(1)* %c) #0
+define amdgpu_kernel void @test_type_qual(ptr addrspace(1) %a,
+                                          ptr addrspace(1) %b,
+                                          ptr addrspace(1) %c) #0
     !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !51
     !kernel_arg_base_type !51 !kernel_arg_type_qual !70 {
   ret void
@@ -917,9 +917,9 @@ define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a,
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_access_qual
 ; CHECK:          .symbol:         test_access_qual.kd
-define amdgpu_kernel void @test_access_qual(%opencl.image1d_t addrspace(1)* %ro,
-                                            %opencl.image2d_t addrspace(1)* %wo,
-                                            %opencl.image3d_t addrspace(1)* %rw) #0
+define amdgpu_kernel void @test_access_qual(ptr addrspace(1) %ro,
+                                            ptr addrspace(1) %wo,
+                                            ptr addrspace(1) %rw) #0
     !kernel_arg_addr_space !60 !kernel_arg_access_qual !61 !kernel_arg_type !62
     !kernel_arg_base_type !62 !kernel_arg_type_qual !25 {
   ret void
@@ -1329,7 +1329,7 @@ define amdgpu_kernel void @test_wgs_hint_vec_type_hint(i32 %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_arg_ptr_to_ptr
 ; CHECK:          .symbol:         test_arg_ptr_to_ptr.kd
-define amdgpu_kernel void @test_arg_ptr_to_ptr(i32 addrspace(5)* addrspace(1)* %a) #0
+define amdgpu_kernel void @test_arg_ptr_to_ptr(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !80
     !kernel_arg_base_type !80 !kernel_arg_type_qual !4 {
   ret void
@@ -1407,7 +1407,7 @@ define amdgpu_kernel void @test_arg_struct_contains_ptr(%struct.B %a) #0
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_arg_vector_of_ptr
 ; CHECK:          .symbol:         test_arg_vector_of_ptr.kd
-define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a) #0
+define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x ptr addrspace(1)> %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !83
     !kernel_arg_base_type !83 !kernel_arg_type_qual !4 {
   ret void
@@ -1448,7 +1448,7 @@ define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a) #0
 ; CHECK:          .name:           test_arg_unknown_builtin_type
 ; CHECK:          .symbol:         test_arg_unknown_builtin_type.kd
 define amdgpu_kernel void @test_arg_unknown_builtin_type(
-    %opencl.clk_event_t addrspace(1)* %a) #0
+    ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !84
     !kernel_arg_base_type !84 !kernel_arg_type_qual !4 {
   ret void
@@ -1536,14 +1536,14 @@ define amdgpu_kernel void @test_arg_unknown_builtin_type(
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_pointee_align
 ; CHECK:          .symbol:         test_pointee_align.kd
-define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
-                                              i8 addrspace(3)* %b,
-                                              <2 x i8> addrspace(3)* align 2 %c,
-                                              <3 x i8> addrspace(3)* align 4 %d,
-                                              <4 x i8> addrspace(3)* align 4 %e,
-                                              <8 x i8> addrspace(3)* align 8 %f,
-                                              <16 x i8> addrspace(3)* align 16 %g,
-                                              {} addrspace(3)* %h) #0
+define amdgpu_kernel void @test_pointee_align(ptr addrspace(1) %a,
+                                              ptr addrspace(3) %b,
+                                              ptr addrspace(3) align 2 %c,
+                                              ptr addrspace(3) align 4 %d,
+                                              ptr addrspace(3) align 4 %e,
+                                              ptr addrspace(3) align 8 %f,
+                                              ptr addrspace(3) align 16 %g,
+                                              ptr addrspace(3) %h) #0
     !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93
     !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
   ret void
@@ -1631,14 +1631,14 @@ define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
 ; CHECK-NEXT:       - 0
 ; CHECK:          .name:           test_pointee_align_attribute
 ; CHECK:          .symbol:         test_pointee_align_attribute.kd
-define amdgpu_kernel void @test_pointee_align_attribute(i64 addrspace(1)* align 16 %a,
-                                                        i8 addrspace(3)* align 8 %b,
-                                                        <2 x i8> addrspace(3)* align 32 %c,
-                                                        <3 x i8> addrspace(3)* align 64 %d,
-                                                        <4 x i8> addrspace(3)* align 256 %e,
-                                                        <8 x i8> addrspace(3)* align 128 %f,
-                                                        <16 x i8> addrspace(3)* align 1024 %g,
-                                                        {} addrspace(3)* align 16 %h) #0
+define amdgpu_kernel void @test_pointee_align_attribute(ptr addrspace(1) align 16 %a,
+                                                        ptr addrspace(3) align 8 %b,
+                                                        ptr addrspace(3) align 32 %c,
+                                                        ptr addrspace(3) align 64 %d,
+                                                        ptr addrspace(3) align 256 %e,
+                                                        ptr addrspace(3) align 128 %f,
+                                                        ptr addrspace(3) align 1024 %g,
+                                                        ptr addrspace(3) align 16 %h) #0
     !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93
     !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
   ret void
@@ -1678,7 +1678,7 @@ define amdgpu_kernel void @test_pointee_align_attribute(i64 addrspace(1)* align
 ; CHECK:          .name:           __test_block_invoke_kernel
 ; CHECK:          .symbol:         __test_block_invoke_kernel.kd
 define amdgpu_kernel void @__test_block_invoke_kernel(
-    <{ i32, i32, i8*, i8 addrspace(1)*, i8 }> %arg) #1
+    <{ i32, i32, ptr, ptr addrspace(1), i8 }> %arg) #1
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !110
     !kernel_arg_base_type !110 !kernel_arg_type_qual !4 {
   ret void
@@ -1730,7 +1730,7 @@ define amdgpu_kernel void @test_enqueue_kernel_caller(i8 %a) #2
 ; CHECK-NEXT:         .value_kind:     global_buffer
 ; CHECK:          .name:           unknown_addrspace_kernarg
 ; CHECK:          .symbol:         unknown_addrspace_kernarg.kd
-define amdgpu_kernel void @unknown_addrspace_kernarg(i32 addrspace(12345)* %ptr) #0 {
+define amdgpu_kernel void @unknown_addrspace_kernarg(ptr addrspace(12345) %ptr) #0 {
   ret void
 }
 
@@ -1778,7 +1778,7 @@ attributes #2 = { optnone noinline "amdgpu-implicitarg-num-bytes"="56" "calls-en
 !29 = !{i8 undef, i32 1}
 !30 = !{i16 undef, i32 1}
 !31 = !{i64 undef, i32 1}
-!32 = !{i32  addrspace(5)*undef, i32 1}
+!32 = !{ptr  addrspace(5) undef, i32 1}
 !50 = !{i32 1, i32 2, i32 3}
 !51 = !{!"int  addrspace(5)*", !"int  addrspace(5)*", !"int  addrspace(5)*"}
 !60 = !{i32 1, i32 1, i32 1}

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full.ll
index f44681a6fcf6..93e1241cb851 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-from-llvm-ir-full.ll
@@ -11,10 +11,10 @@
 %opencl.image3d_t = type opaque
 %opencl.queue_t = type opaque
 %opencl.pipe_t = type opaque
-%struct.B = type { i32 addrspace(1)*}
+%struct.B = type { ptr addrspace(1)}
 %opencl.clk_event_t = type opaque
 
- at __test_block_invoke_kernel_runtime_handle = external addrspace(1) externally_initialized constant i8 addrspace(1)*
+ at __test_block_invoke_kernel_runtime_handle = external addrspace(1) externally_initialized constant ptr addrspace(1)
 
 ; CHECK: ---
 ; CHECK:  Version: [ 1, 0 ]
@@ -87,7 +87,7 @@ define amdgpu_kernel void @test_char(i8 %a) #0
 ; CHECK-NOT:        ValueKind:     HiddenCompletionAction
 ; CHECK:            ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_char_byref_constant(i8 addrspace(4)* byref(i8) %a) #0
+define amdgpu_kernel void @test_char_byref_constant(ptr addrspace(4) byref(i8) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !9
     !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
   ret void
@@ -126,7 +126,7 @@ define amdgpu_kernel void @test_char_byref_constant(i8 addrspace(4)* byref(i8) %
 ; CHECK-NOT:        ValueKind:     HiddenCompletionAction
 ; CHECK:            ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_char_byref_constant_align512(i8, i8 addrspace(4)* byref(i8) align 512 %a) #0
+define amdgpu_kernel void @test_char_byref_constant_align512(i8, ptr addrspace(4) byref(i8) align 512 %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !111
     !kernel_arg_base_type !9 !kernel_arg_type_qual !4 {
   ret void
@@ -421,7 +421,7 @@ define amdgpu_kernel void @test_double16(<16 x double> %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a) #0
+define amdgpu_kernel void @test_pointer(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !16
     !kernel_arg_base_type !16 !kernel_arg_type_qual !4 {
   ret void
@@ -464,7 +464,7 @@ define amdgpu_kernel void @test_pointer(i32 addrspace(1)* %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_image(%opencl.image2d_t addrspace(1)* %a) #0
+define amdgpu_kernel void @test_image(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !17
     !kernel_arg_base_type !17 !kernel_arg_type_qual !4 {
   ret void
@@ -549,7 +549,7 @@ define amdgpu_kernel void @test_sampler(i32 %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_queue(%opencl.queue_t addrspace(1)* %a) #0
+define amdgpu_kernel void @test_queue(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !19
     !kernel_arg_base_type !19 !kernel_arg_type_qual !4 {
   ret void
@@ -633,7 +633,7 @@ define amdgpu_kernel void @test_struct(%struct.A %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_struct_byref_constant(%struct.A addrspace(4)* byref(%struct.A) %a) #0
+define amdgpu_kernel void @test_struct_byref_constant(ptr addrspace(4) byref(%struct.A) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20
     !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
   ret void
@@ -717,7 +717,7 @@ define amdgpu_kernel void @test_array([8 x i8] %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_array_byref_constant([8 x i8] addrspace(4)* byref([8 x i8]) %a) #0
+define amdgpu_kernel void @test_array_byref_constant(ptr addrspace(4) byref([8 x i8]) %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !20
     !kernel_arg_base_type !20 !kernel_arg_type_qual !4 {
   ret void
@@ -871,9 +871,9 @@ define amdgpu_kernel void @test_multi_arg(i32 %a, <2 x i16> %b, <3 x i8> %c) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g,
-                                           i32 addrspace(4)* %c,
-                                           i32 addrspace(3)* align 4 %l) #0
+define amdgpu_kernel void @test_addr_space(ptr addrspace(1) %g,
+                                           ptr addrspace(4) %c,
+                                           ptr addrspace(3) align 4 %l) #0
     !kernel_arg_addr_space !50 !kernel_arg_access_qual !23 !kernel_arg_type !51
     !kernel_arg_base_type !51 !kernel_arg_type_qual !25 {
   ret void
@@ -934,9 +934,9 @@ define amdgpu_kernel void @test_addr_space(i32 addrspace(1)* %g,
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a,
-                                          i32 addrspace(1)* %b,
-                                          %opencl.pipe_t addrspace(1)* %c) #0
+define amdgpu_kernel void @test_type_qual(ptr addrspace(1) %a,
+                                          ptr addrspace(1) %b,
+                                          ptr addrspace(1) %c) #0
     !kernel_arg_addr_space !22 !kernel_arg_access_qual !23 !kernel_arg_type !51
     !kernel_arg_base_type !51 !kernel_arg_type_qual !70 {
   ret void
@@ -993,9 +993,9 @@ define amdgpu_kernel void @test_type_qual(i32 addrspace(1)* %a,
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_access_qual(%opencl.image1d_t addrspace(1)* %ro,
-                                            %opencl.image2d_t addrspace(1)* %wo,
-                                            %opencl.image3d_t addrspace(1)* %rw) #0
+define amdgpu_kernel void @test_access_qual(ptr addrspace(1) %ro,
+                                            ptr addrspace(1) %wo,
+                                            ptr addrspace(1) %rw) #0
     !kernel_arg_addr_space !60 !kernel_arg_access_qual !61 !kernel_arg_type !62
     !kernel_arg_base_type !62 !kernel_arg_type_qual !25 {
   ret void
@@ -1438,7 +1438,7 @@ define amdgpu_kernel void @test_wgs_hint_vec_type_hint(i32 %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_arg_ptr_to_ptr(i32 addrspace(5)* addrspace(1)* %a) #0
+define amdgpu_kernel void @test_arg_ptr_to_ptr(ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !80
     !kernel_arg_base_type !80 !kernel_arg_type_qual !4 {
   ret void
@@ -1510,7 +1510,7 @@ define amdgpu_kernel void @test_arg_struct_contains_ptr(%struct.B %a) #0
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a) #0
+define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x ptr addrspace(1)> %a) #0
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !83
     !kernel_arg_base_type !83 !kernel_arg_type_qual !4 {
   ret void
@@ -1554,7 +1554,7 @@ define amdgpu_kernel void @test_arg_vector_of_ptr(<2 x i32 addrspace(1)*> %a) #0
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
 define amdgpu_kernel void @test_arg_unknown_builtin_type(
-    %opencl.clk_event_t addrspace(1)* %a) #0
+    ptr addrspace(1) %a) #0
     !kernel_arg_addr_space !81 !kernel_arg_access_qual !2 !kernel_arg_type !84
     !kernel_arg_base_type !84 !kernel_arg_type_qual !4 {
   ret void
@@ -1651,14 +1651,14 @@ define amdgpu_kernel void @test_arg_unknown_builtin_type(
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
-                                              i8 addrspace(3)* %b,
-                                              <2 x i8> addrspace(3)* align 2 %c,
-                                              <3 x i8> addrspace(3)* align 4 %d,
-                                              <4 x i8> addrspace(3)* align 4 %e,
-                                              <8 x i8> addrspace(3)* align 8 %f,
-                                              <16 x i8> addrspace(3)* align 16 %g,
-                                              {} addrspace(3)* %h) #0
+define amdgpu_kernel void @test_pointee_align(ptr addrspace(1) %a,
+                                              ptr addrspace(3) %b,
+                                              ptr addrspace(3) align 2 %c,
+                                              ptr addrspace(3) align 4 %d,
+                                              ptr addrspace(3) align 4 %e,
+                                              ptr addrspace(3) align 8 %f,
+                                              ptr addrspace(3) align 16 %g,
+                                              ptr addrspace(3) %h) #0
     !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93
     !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
   ret void
@@ -1755,14 +1755,14 @@ define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
 ; CHECK-NEXT:       Align:         8
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
-define amdgpu_kernel void @test_pointee_align_attribute(i64 addrspace(1)* align 16 %a,
-                                                        i8 addrspace(3)* align 8 %b,
-                                                        <2 x i8> addrspace(3)* align 32 %c,
-                                                        <3 x i8> addrspace(3)* align 64 %d,
-                                                        <4 x i8> addrspace(3)* align 256 %e,
-                                                        <8 x i8> addrspace(3)* align 128 %f,
-                                                        <16 x i8> addrspace(3)* align 1024 %g,
-                                                        {} addrspace(3)* align 16 %h) #0
+define amdgpu_kernel void @test_pointee_align_attribute(ptr addrspace(1) align 16 %a,
+                                                        ptr addrspace(3) align 8 %b,
+                                                        ptr addrspace(3) align 32 %c,
+                                                        ptr addrspace(3) align 64 %d,
+                                                        ptr addrspace(3) align 256 %e,
+                                                        ptr addrspace(3) align 128 %f,
+                                                        ptr addrspace(3) align 1024 %g,
+                                                        ptr addrspace(3) align 16 %h) #0
     !kernel_arg_addr_space !91 !kernel_arg_access_qual !92 !kernel_arg_type !93
     !kernel_arg_base_type !93 !kernel_arg_type_qual !94 {
   ret void
@@ -1808,7 +1808,7 @@ define amdgpu_kernel void @test_pointee_align_attribute(i64 addrspace(1)* align
 ; CHECK-NEXT:       ValueKind:     HiddenMultiGridSyncArg
 ; CHECK-NEXT:       AddrSpaceQual: Global
 define amdgpu_kernel void @__test_block_invoke_kernel(
-    <{ i32, i32, i8*, i8 addrspace(1)*, i8 }> %arg) #1
+    <{ i32, i32, ptr, ptr addrspace(1), i8 }> %arg) #1
     !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !110
     !kernel_arg_base_type !110 !kernel_arg_type_qual !4 {
   ret void
@@ -1862,7 +1862,7 @@ define amdgpu_kernel void @test_enqueue_kernel_caller(i8 %a) #2
 ; CHECK-NEXT: Size:            8
 ; CHECK-NEXT: Align:           8
 ; CHECK-NEXT: ValueKind:       GlobalBuffer
-define amdgpu_kernel void @unknown_addrspace_kernarg(i32 addrspace(12345)* %ptr) #0 {
+define amdgpu_kernel void @unknown_addrspace_kernarg(ptr addrspace(12345) %ptr) #0 {
   ret void
 }
 
@@ -1903,7 +1903,7 @@ attributes #2 = { optnone noinline "amdgpu-implicitarg-num-bytes"="56" "calls-en
 !29 = !{i8 undef, i32 1}
 !30 = !{i16 undef, i32 1}
 !31 = !{i64 undef, i32 1}
-!32 = !{i32  addrspace(5)*undef, i32 1}
+!32 = !{ptr addrspace(5) undef, i32 1}
 !50 = !{i32 1, i32 2, i32 3}
 !51 = !{!"int  addrspace(5)*", !"int  addrspace(5)*", !"int  addrspace(5)*"}
 !60 = !{i32 1, i32 1, i32 1}

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll
index ec883d860d04..91864c54ce11 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll
@@ -6,35 +6,34 @@ declare void @function1()
 declare void @function2() #0
 
 ; Function Attrs: noinline
-define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define void @function3(ptr addrspace(4) %argptr, ptr addrspace(1) %sink) #2 {
+  store ptr addrspace(4) %argptr, ptr addrspace(1) %sink, align 8
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function4(i64 %arg, i64* %a) #2 {
-  store i64 %arg, i64* %a
+define void @function4(i64 %arg, ptr %a) #2 {
+  store i64 %arg, ptr %a
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #2 {
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 64
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %sink
+define void @function5(ptr addrspace(4) %ptr, ptr %sink) #2 {
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 64
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %sink
   ret void
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+declare align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
 
 ; CHECK: amdhsa.kernels:
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel10
-define amdgpu_kernel void @test_kernel10(i8* %a) {
-  store i8 3, i8* %a, align 1
+define amdgpu_kernel void @test_kernel10(ptr %a) {
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -43,9 +42,9 @@ define amdgpu_kernel void @test_kernel10(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel20
-define amdgpu_kernel void @test_kernel20(i8* %a) {
+define amdgpu_kernel void @test_kernel20(ptr %a) {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -54,9 +53,9 @@ define amdgpu_kernel void @test_kernel20(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel21
-define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
+define amdgpu_kernel void @test_kernel21(ptr %a) #0 {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -65,9 +64,9 @@ define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel22
-define amdgpu_kernel void @test_kernel22(i8* %a) {
+define amdgpu_kernel void @test_kernel22(ptr %a) {
   call void @function2()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -76,12 +75,11 @@ define amdgpu_kernel void @test_kernel22(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel30
-define amdgpu_kernel void @test_kernel30(i128* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
-  %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
-  %x = load i128, i128 addrspace(4)* %cast
-  store i128 %x, i128* %a
+define amdgpu_kernel void @test_kernel30(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 88
+  %x = load i128, ptr addrspace(4) %gep
+  store i128 %x, ptr %a
   ret void
 }
 
@@ -90,12 +88,11 @@ define amdgpu_kernel void @test_kernel30(i128* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel40
-define amdgpu_kernel void @test_kernel40(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 96
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel40(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 96
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -104,12 +101,11 @@ define amdgpu_kernel void @test_kernel40(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel41
-define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 96
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel41(ptr %a) #0 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 96
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -118,12 +114,11 @@ define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel42
-define amdgpu_kernel void @test_kernel42(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel42(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 88
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -132,12 +127,11 @@ define amdgpu_kernel void @test_kernel42(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel43
-define amdgpu_kernel void @test_kernel43(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 104
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel43(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 104
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -146,11 +140,11 @@ define amdgpu_kernel void @test_kernel43(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel44
-define amdgpu_kernel void @test_kernel44(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 95
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel44(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 95
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -159,11 +153,11 @@ define amdgpu_kernel void @test_kernel44(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel45
-define amdgpu_kernel void @test_kernel45(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 96
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel45(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 96
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -172,11 +166,11 @@ define amdgpu_kernel void @test_kernel45(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel46
-define amdgpu_kernel void @test_kernel46(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 103
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel46(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 103
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -185,11 +179,11 @@ define amdgpu_kernel void @test_kernel46(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel47
-define amdgpu_kernel void @test_kernel47(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 104
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel47(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 104
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -198,11 +192,11 @@ define amdgpu_kernel void @test_kernel47(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel50
-define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel50(ptr %a, i32 %b) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 %b
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -211,12 +205,12 @@ define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel51
-define amdgpu_kernel void @test_kernel51(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 80
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel51(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 80
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -225,12 +219,12 @@ define amdgpu_kernel void @test_kernel51(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel52
-define amdgpu_kernel void @test_kernel52(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel52(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 16
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -239,12 +233,11 @@ define amdgpu_kernel void @test_kernel52(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel60
-define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 96
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  call void @function4(i64 %x, i64* %a)
+define amdgpu_kernel void @test_kernel60(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 96
+  %x = load i64, ptr addrspace(4) %gep
+  call void @function4(i64 %x, ptr %a)
   ret void
 }
 
@@ -253,10 +246,10 @@ define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel61
-define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
-  call void @function5(i8 addrspace(4)* %gep, i64* %a)
+define amdgpu_kernel void @test_kernel61(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 32
+  call void @function5(ptr addrspace(4) %gep, ptr %a)
   ret void
 }
 
@@ -265,10 +258,10 @@ define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel70
-define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define amdgpu_kernel void @test_kernel70(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) %sink, align 8
   ret void
 }
 
@@ -277,10 +270,10 @@ define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK:  - .args:
 ; CHECK: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel71
-define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+define amdgpu_kernel void @test_kernel71(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  call void @function3(ptr addrspace(4) %gep, ptr addrspace(1) %sink)
   ret void
 }
 
@@ -290,9 +283,9 @@ define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK-NOT: hidden_heap_v1
 ; CHECK-LABEL:    .name:           test_kernel72
 define amdgpu_kernel void @test_kernel72() #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
index 734ad53e9be0..140b67cb2f25 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
@@ -6,35 +6,34 @@ declare void @function1()
 declare void @function2() #0
 
 ; Function Attrs: noinline
-define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #4 {
-  store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define void @function3(ptr addrspace(4) %argptr, ptr addrspace(1) %sink) #4 {
+  store ptr addrspace(4) %argptr, ptr addrspace(1) %sink, align 8
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function4(i64 %arg, i64* %a) #4 {
-  store i64 %arg, i64* %a
+define void @function4(i64 %arg, ptr %a) #4 {
+  store i64 %arg, ptr %a
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #4 {
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 8
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %sink
+define void @function5(ptr addrspace(4) %ptr, ptr %sink) #4 {
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 8
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %sink
   ret void
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+declare align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
 
 ; CHECK: amdhsa.kernels:
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel10
-define amdgpu_kernel void @test_kernel10(i8* %a) #2 {
-  store i8 3, i8* %a, align 1
+define amdgpu_kernel void @test_kernel10(ptr %a) #2 {
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -43,9 +42,9 @@ define amdgpu_kernel void @test_kernel10(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel20
-define amdgpu_kernel void @test_kernel20(i8* %a) #2 {
+define amdgpu_kernel void @test_kernel20(ptr %a) #2 {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -54,9 +53,9 @@ define amdgpu_kernel void @test_kernel20(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel21
-define amdgpu_kernel void @test_kernel21(i8* %a) #3 {
+define amdgpu_kernel void @test_kernel21(ptr %a) #3 {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -65,9 +64,9 @@ define amdgpu_kernel void @test_kernel21(i8* %a) #3 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel22
-define amdgpu_kernel void @test_kernel22(i8* %a) #2 {
+define amdgpu_kernel void @test_kernel22(ptr %a) #2 {
   call void @function2()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -76,12 +75,11 @@ define amdgpu_kernel void @test_kernel22(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel30
-define amdgpu_kernel void @test_kernel30(i128* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
-  %x = load i128, i128 addrspace(4)* %cast
-  store i128 %x, i128* %a
+define amdgpu_kernel void @test_kernel30(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %x = load i128, ptr addrspace(4) %gep
+  store i128 %x, ptr %a
   ret void
 }
 
@@ -90,12 +88,11 @@ define amdgpu_kernel void @test_kernel30(i128* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel40
-define amdgpu_kernel void @test_kernel40(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel40(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 24
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -104,12 +101,11 @@ define amdgpu_kernel void @test_kernel40(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel41
-define amdgpu_kernel void @test_kernel41(i64* %a) #3 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel41(ptr %a) #3 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 24
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -118,12 +114,11 @@ define amdgpu_kernel void @test_kernel41(i64* %a) #3 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel42
-define amdgpu_kernel void @test_kernel42(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel42(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -132,12 +127,11 @@ define amdgpu_kernel void @test_kernel42(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel43
-define amdgpu_kernel void @test_kernel43(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel43(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 32
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -146,11 +140,11 @@ define amdgpu_kernel void @test_kernel43(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel44
-define amdgpu_kernel void @test_kernel44(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 23
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel44(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 23
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -159,11 +153,11 @@ define amdgpu_kernel void @test_kernel44(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel45
-define amdgpu_kernel void @test_kernel45(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel45(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 24
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -172,11 +166,11 @@ define amdgpu_kernel void @test_kernel45(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel46
-define amdgpu_kernel void @test_kernel46(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 31
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel46(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 31
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -185,11 +179,11 @@ define amdgpu_kernel void @test_kernel46(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel47
-define amdgpu_kernel void @test_kernel47(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel47(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 32
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -198,11 +192,11 @@ define amdgpu_kernel void @test_kernel47(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel50
-define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel50(ptr %a, i32 %b) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 %b
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -211,12 +205,12 @@ define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel51
-define amdgpu_kernel void @test_kernel51(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 8
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel51(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 8
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -225,12 +219,12 @@ define amdgpu_kernel void @test_kernel51(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel52
-define amdgpu_kernel void @test_kernel52(i8* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel52(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 16
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -239,12 +233,11 @@ define amdgpu_kernel void @test_kernel52(i8* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel60
-define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  call void @function4(i64 %x, i64* %a)
+define amdgpu_kernel void @test_kernel60(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 24
+  %x = load i64, ptr addrspace(4) %gep
+  call void @function4(i64 %x, ptr %a)
   ret void
 }
 
@@ -253,10 +246,10 @@ define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel61
-define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  call void @function5(i8 addrspace(4)* %gep, i64* %a)
+define amdgpu_kernel void @test_kernel61(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  call void @function5(ptr addrspace(4) %gep, ptr %a)
   ret void
 }
 
@@ -265,10 +258,10 @@ define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel70
-define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define amdgpu_kernel void @test_kernel70(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) %sink, align 8
   ret void
 }
 
@@ -277,10 +270,10 @@ define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel71
-define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+define amdgpu_kernel void @test_kernel71(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  call void @function3(ptr addrspace(4) %gep, ptr addrspace(1) %sink)
   ret void
 }
 
@@ -290,9 +283,9 @@ define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel72
 define amdgpu_kernel void @test_kernel72() #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
index a832ca1d60aa..abe9d881eacd 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
@@ -6,35 +6,34 @@ declare void @function1()
 declare void @function2() #0
 
 ; Function Attrs: noinline
-define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define void @function3(ptr addrspace(4) %argptr, ptr addrspace(1) %sink) #2 {
+  store ptr addrspace(4) %argptr, ptr addrspace(1) %sink, align 8
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function4(i64 %arg, i64* %a) #2 {
-  store i64 %arg, i64* %a
+define void @function4(i64 %arg, ptr %a) #2 {
+  store i64 %arg, ptr %a
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #2 {
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 64
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %sink
+define void @function5(ptr addrspace(4) %ptr, ptr %sink) #2 {
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 64
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %sink
   ret void
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+declare align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
 
 ; CHECK: amdhsa.kernels:
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel10
-define amdgpu_kernel void @test_kernel10(i8* %a) {
-  store i8 3, i8* %a, align 1
+define amdgpu_kernel void @test_kernel10(ptr %a) {
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -43,9 +42,9 @@ define amdgpu_kernel void @test_kernel10(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel20
-define amdgpu_kernel void @test_kernel20(i8* %a) {
+define amdgpu_kernel void @test_kernel20(ptr %a) {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -54,9 +53,9 @@ define amdgpu_kernel void @test_kernel20(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel21
-define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
+define amdgpu_kernel void @test_kernel21(ptr %a) #0 {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -65,9 +64,9 @@ define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel22
-define amdgpu_kernel void @test_kernel22(i8* %a) {
+define amdgpu_kernel void @test_kernel22(ptr %a) {
   call void @function2()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -76,12 +75,11 @@ define amdgpu_kernel void @test_kernel22(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel30
-define amdgpu_kernel void @test_kernel30(i128* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 72
-  %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
-  %x = load i128, i128 addrspace(4)* %cast
-  store i128 %x, i128* %a
+define amdgpu_kernel void @test_kernel30(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 72
+  %x = load i128, ptr addrspace(4) %gep
+  store i128 %x, ptr %a
   ret void
 }
 
@@ -90,12 +88,11 @@ define amdgpu_kernel void @test_kernel30(i128* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel40
-define amdgpu_kernel void @test_kernel40(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel40(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 80
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -104,12 +101,11 @@ define amdgpu_kernel void @test_kernel40(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel41
-define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel41(ptr %a) #0 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 80
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -118,12 +114,11 @@ define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel42
-define amdgpu_kernel void @test_kernel42(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 72
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel42(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 72
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -132,12 +127,11 @@ define amdgpu_kernel void @test_kernel42(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel43
-define amdgpu_kernel void @test_kernel43(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel43(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 88
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -146,11 +140,11 @@ define amdgpu_kernel void @test_kernel43(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel44
-define amdgpu_kernel void @test_kernel44(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 79
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel44(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 79
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -159,11 +153,11 @@ define amdgpu_kernel void @test_kernel44(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel45
-define amdgpu_kernel void @test_kernel45(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel45(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 80
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -172,11 +166,11 @@ define amdgpu_kernel void @test_kernel45(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel46
-define amdgpu_kernel void @test_kernel46(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 87
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel46(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 87
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -185,11 +179,11 @@ define amdgpu_kernel void @test_kernel46(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel47
-define amdgpu_kernel void @test_kernel47(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel47(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 88
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -198,11 +192,11 @@ define amdgpu_kernel void @test_kernel47(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel50
-define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel50(ptr %a, i32 %b) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 %b
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -211,12 +205,12 @@ define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel51
-define amdgpu_kernel void @test_kernel51(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 64
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel51(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 64
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -225,12 +219,12 @@ define amdgpu_kernel void @test_kernel51(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel52
-define amdgpu_kernel void @test_kernel52(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel52(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 16
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -239,12 +233,11 @@ define amdgpu_kernel void @test_kernel52(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel60
-define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  call void @function4(i64 %x, i64* %a)
+define amdgpu_kernel void @test_kernel60(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 80
+  %x = load i64, ptr addrspace(4) %gep
+  call void @function4(i64 %x, ptr %a)
   ret void
 }
 
@@ -253,10 +246,10 @@ define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel61
-define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  call void @function5(i8 addrspace(4)* %gep, i64* %a)
+define amdgpu_kernel void @test_kernel61(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  call void @function5(ptr addrspace(4) %gep, ptr %a)
   ret void
 }
 
@@ -265,10 +258,10 @@ define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel70
-define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define amdgpu_kernel void @test_kernel70(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) %sink, align 8
   ret void
 }
 
@@ -277,10 +270,10 @@ define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK:  - .args:
 ; CHECK: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel71
-define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+define amdgpu_kernel void @test_kernel71(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  call void @function3(ptr addrspace(4) %gep, ptr addrspace(1) %sink)
   ret void
 }
 
@@ -290,9 +283,9 @@ define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK-NOT: hidden_hostcall_buffer
 ; CHECK-LABEL:    .name:           test_kernel72
 define amdgpu_kernel void @test_kernel72() #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll
index 6e46dba419b8..f00f4ff4879f 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll
@@ -12,11 +12,11 @@
 ; PRE-GFX9:		hidden_queue_ptr
 ; GFX9-NOT:		hidden_queue_ptr
 ; CHECK-LABEL:		.name:           addrspacecast_requires_queue_ptr
-define amdgpu_kernel void @addrspacecast_requires_queue_ptr(i32 addrspace(5)* %ptr.private, i32 addrspace(3)* %ptr.local) {
-  %flat.private = addrspacecast i32 addrspace(5)* %ptr.private to i32*
-  %flat.local = addrspacecast i32 addrspace(3)* %ptr.local to i32*
-  store volatile i32 1, i32* %flat.private
-  store volatile i32 2, i32* %flat.local
+define amdgpu_kernel void @addrspacecast_requires_queue_ptr(ptr addrspace(5) %ptr.private, ptr addrspace(3) %ptr.local) {
+  %flat.private = addrspacecast ptr addrspace(5) %ptr.private to ptr
+  %flat.local = addrspacecast ptr addrspace(3) %ptr.local to ptr
+  store volatile i32 1, ptr %flat.private
+  store volatile i32 2, ptr %flat.local
   ret void
 }
 
@@ -24,10 +24,10 @@ define amdgpu_kernel void @addrspacecast_requires_queue_ptr(i32 addrspace(5)* %p
 ; PRE-GFX9:		hidden_shared_base
 ; GFX9-NOT:		hidden_shared_base
 ; CHECK-LABEL:		.name:          is_shared_requires_queue_ptr
-define amdgpu_kernel void @is_shared_requires_queue_ptr(i8* %ptr) {
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %ptr)
+define amdgpu_kernel void @is_shared_requires_queue_ptr(ptr %ptr) {
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %ptr)
   %zext = zext i1 %is.shared to i32
-  store volatile i32 %zext, i32 addrspace(1)* undef
+  store volatile i32 %zext, ptr addrspace(1) undef
   ret void
 }
 
@@ -35,10 +35,10 @@ define amdgpu_kernel void @is_shared_requires_queue_ptr(i8* %ptr) {
 ; PRE-GFX9:		hidden_private_base
 ; GFX9-NOT:		hidden_private_base
 ; CHECK-LABEL:		.name:           is_private_requires_queue_ptr
-define amdgpu_kernel void @is_private_requires_queue_ptr(i8* %ptr) {
-  %is.private = call i1 @llvm.amdgcn.is.private(i8* %ptr)
+define amdgpu_kernel void @is_private_requires_queue_ptr(ptr %ptr) {
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %ptr)
   %zext = zext i1 %is.private to i32
-  store volatile i32 %zext, i32 addrspace(1)* undef
+  store volatile i32 %zext, ptr addrspace(1) undef
   ret void
 }
 
@@ -55,24 +55,24 @@ define amdgpu_kernel void @trap_requires_queue_ptr() {
 ; CHECK: - .args:
 ; CHECK:		hidden_queue_ptr
 ; CHECK-LABEL:		.name:           amdgcn_queue_ptr_requires_queue_ptr
-define amdgpu_kernel void @amdgcn_queue_ptr_requires_queue_ptr(i64 addrspace(1)* %ptr)  {
-  %queue.ptr = call i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-  %implicitarg.ptr = call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %dispatch.ptr = call i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+define amdgpu_kernel void @amdgcn_queue_ptr_requires_queue_ptr(ptr addrspace(1) %ptr)  {
+  %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
   %dispatch.id = call i64 @llvm.amdgcn.dispatch.id()
-  %queue.load = load volatile i8, i8 addrspace(4)* %queue.ptr
-  %implicitarg.load = load volatile i8, i8 addrspace(4)* %implicitarg.ptr
-  %dispatch.load = load volatile i8, i8 addrspace(4)* %dispatch.ptr
-  store volatile i64 %dispatch.id, i64 addrspace(1)* %ptr
+  %queue.load = load volatile i8, ptr addrspace(4) %queue.ptr
+  %implicitarg.load = load volatile i8, ptr addrspace(4) %implicitarg.ptr
+  %dispatch.load = load volatile i8, ptr addrspace(4) %dispatch.ptr
+  store volatile i64 %dispatch.id, ptr addrspace(1) %ptr
   ret void
 }
 
 
-declare noalias i8 addrspace(4)* @llvm.amdgcn.queue.ptr()
-declare noalias i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+declare noalias ptr addrspace(4) @llvm.amdgcn.queue.ptr()
+declare noalias ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 declare i64 @llvm.amdgcn.dispatch.id()
-declare noalias i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
-declare i1 @llvm.amdgcn.is.shared(i8*)
-declare i1 @llvm.amdgcn.is.private(i8*)
+declare noalias ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
+declare i1 @llvm.amdgcn.is.shared(ptr)
+declare i1 @llvm.amdgcn.is.private(ptr)
 declare void @llvm.trap()
 declare void @llvm.debugtrap()

diff  --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll
index b2230b4a8321..9671ffc153c4 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll
@@ -6,35 +6,34 @@ declare void @function1()
 declare void @function2() #0
 
 ; Function Attrs: noinline
-define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define void @function3(ptr addrspace(4) %argptr, ptr addrspace(1) %sink) #2 {
+  store ptr addrspace(4) %argptr, ptr addrspace(1) %sink, align 8
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function4(i64 %arg, i64* %a) #2 {
-  store i64 %arg, i64* %a
+define void @function4(i64 %arg, ptr %a) #2 {
+  store i64 %arg, ptr %a
   ret void
 }
 
 ; Function Attrs: noinline
-define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #2 {
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 168
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %sink
+define void @function5(ptr addrspace(4) %ptr, ptr %sink) #2 {
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 168
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %sink
   ret void
 }
 
 ; Function Attrs: nounwind readnone speculatable willreturn
-declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+declare align 4 ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #1
 
 ; CHECK: amdhsa.kernels:
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel10
-define amdgpu_kernel void @test_kernel10(i8* %a) {
-  store i8 3, i8* %a, align 1
+define amdgpu_kernel void @test_kernel10(ptr %a) {
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -43,9 +42,9 @@ define amdgpu_kernel void @test_kernel10(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel20
-define amdgpu_kernel void @test_kernel20(i8* %a) {
+define amdgpu_kernel void @test_kernel20(ptr %a) {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -54,9 +53,9 @@ define amdgpu_kernel void @test_kernel20(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel21
-define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
+define amdgpu_kernel void @test_kernel21(ptr %a) #0 {
   call void @function1()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -65,9 +64,9 @@ define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel22
-define amdgpu_kernel void @test_kernel22(i8* %a) {
+define amdgpu_kernel void @test_kernel22(ptr %a) {
   call void @function2()
-  store i8 3, i8* %a, align 1
+  store i8 3, ptr %a, align 1
   ret void
 }
 
@@ -76,12 +75,11 @@ define amdgpu_kernel void @test_kernel22(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel30
-define amdgpu_kernel void @test_kernel30(i128* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 192
-  %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
-  %x = load i128, i128 addrspace(4)* %cast
-  store i128 %x, i128* %a
+define amdgpu_kernel void @test_kernel30(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 192
+  %x = load i128, ptr addrspace(4) %gep
+  store i128 %x, ptr %a
   ret void
 }
 
@@ -90,12 +88,11 @@ define amdgpu_kernel void @test_kernel30(i128* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel40
-define amdgpu_kernel void @test_kernel40(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 200
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel40(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 200
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -104,12 +101,11 @@ define amdgpu_kernel void @test_kernel40(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel41
-define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 200
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel41(ptr %a) #0 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 200
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -118,12 +114,11 @@ define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel42
-define amdgpu_kernel void @test_kernel42(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 192
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel42(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 192
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -132,12 +127,11 @@ define amdgpu_kernel void @test_kernel42(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel43
-define amdgpu_kernel void @test_kernel43(i64* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 208
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  store i64 %x, i64* %a
+define amdgpu_kernel void @test_kernel43(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 208
+  %x = load i64, ptr addrspace(4) %gep
+  store i64 %x, ptr %a
   ret void
 }
 
@@ -146,11 +140,11 @@ define amdgpu_kernel void @test_kernel43(i64* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel44
-define amdgpu_kernel void @test_kernel44(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 199
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel44(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 199
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -159,11 +153,11 @@ define amdgpu_kernel void @test_kernel44(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel45
-define amdgpu_kernel void @test_kernel45(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 200
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel45(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 200
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -172,11 +166,11 @@ define amdgpu_kernel void @test_kernel45(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel46
-define amdgpu_kernel void @test_kernel46(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 207
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel46(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 207
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -185,11 +179,11 @@ define amdgpu_kernel void @test_kernel46(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel47
-define amdgpu_kernel void @test_kernel47(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 208
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel47(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 208
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -198,11 +192,11 @@ define amdgpu_kernel void @test_kernel47(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel50
-define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
-  %x = load i8, i8 addrspace(4)* %gep, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel50(ptr %a, i32 %b) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 %b
+  %x = load i8, ptr addrspace(4) %gep, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -211,12 +205,12 @@ define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel51
-define amdgpu_kernel void @test_kernel51(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 184
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel51(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 184
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -225,12 +219,12 @@ define amdgpu_kernel void @test_kernel51(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel52
-define amdgpu_kernel void @test_kernel52(i8* %a) {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
-  %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
-  %x = load i8, i8 addrspace(4)* %gep2, align 1
-  store i8 %x, i8* %a, align 1
+define amdgpu_kernel void @test_kernel52(ptr %a) {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep1 = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 16
+  %gep2 = getelementptr inbounds i8, ptr addrspace(4) %gep1, i64 16
+  %x = load i8, ptr addrspace(4) %gep2, align 1
+  store i8 %x, ptr %a, align 1
   ret void
 }
 
@@ -239,12 +233,11 @@ define amdgpu_kernel void @test_kernel52(i8* %a) {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel60
-define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 200
-  %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
-  %x = load i64, i64 addrspace(4)* %cast
-  call void @function4(i64 %x, i64* %a)
+define amdgpu_kernel void @test_kernel60(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 200
+  %x = load i64, ptr addrspace(4) %gep
+  call void @function4(i64 %x, ptr %a)
   ret void
 }
 
@@ -253,10 +246,10 @@ define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel61
-define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
-  call void @function5(i8 addrspace(4)* %gep, i64* %a)
+define amdgpu_kernel void @test_kernel61(ptr %a) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i64 32
+  call void @function5(ptr addrspace(4) %gep, ptr %a)
   ret void
 }
 
@@ -265,10 +258,10 @@ define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel70
-define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+define amdgpu_kernel void @test_kernel70(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) %sink, align 8
   ret void
 }
 
@@ -277,10 +270,10 @@ define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK:  - .args:
 ; CHECK: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel71
-define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+define amdgpu_kernel void @test_kernel71(ptr addrspace(1) %sink) #2 {
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  call void @function3(ptr addrspace(4) %gep, ptr addrspace(1) %sink)
   ret void
 }
 
@@ -290,9 +283,9 @@ define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #
 ; CHECK-NOT: hidden_queue_ptr
 ; CHECK-LABEL:    .name:           test_kernel72
 define amdgpu_kernel void @test_kernel72() #2 {
-  %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
-  %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
-  store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+  %ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr inbounds i8, ptr addrspace(4) %ptr, i32 42
+  store ptr addrspace(4) %gep, ptr addrspace(1) undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt-opaque-ptr.ll b/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt-opaque-ptr.ll
index e95a68443abf..1f3817b688ae 100644
--- a/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt-opaque-ptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt-opaque-ptr.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=5 -S -opaque-pointers -passes=amdgpu-lower-kernel-attributes,instcombine %s | FileCheck -enable-var-scope -check-prefix=GCN %s
 
 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
-define amdgpu_kernel void @get_local_size_x_opaque_pointer(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @get_local_size_x_opaque_pointer(ptr addrspace(1) %out) #0 {
 ; GCN-LABEL: @get_local_size_x_opaque_pointer(
 ; GCN-NEXT:    [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; GCN-NEXT:    [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 12
@@ -17,12 +17,12 @@ define amdgpu_kernel void @get_local_size_x_opaque_pointer(i16 addrspace(1)* %ou
   %local.size.offset = select i1 %cmp.id.count, i64 12, i64 18
   %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
   %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
-  store i16 %local.size, i16 addrspace(1)* %out
+  store i16 %local.size, ptr addrspace(1) %out
   ret void
 }
 
 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
-define amdgpu_kernel void @get_local_size_y_opaque_pointer(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @get_local_size_y_opaque_pointer(ptr addrspace(1) %out) #0 {
 ; GCN-LABEL: @get_local_size_y_opaque_pointer(
 ; GCN-NEXT:    [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; GCN-NEXT:    [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 14
@@ -38,12 +38,12 @@ define amdgpu_kernel void @get_local_size_y_opaque_pointer(i16 addrspace(1)* %ou
   %local.size.offset = select i1 %cmp.id.count, i64 14, i64 20
   %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
   %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
-  store i16 %local.size, i16 addrspace(1)* %out
+  store i16 %local.size, ptr addrspace(1) %out
   ret void
 }
 
 ; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
-define amdgpu_kernel void @get_local_size_z_opaque_pointer(i16 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @get_local_size_z_opaque_pointer(ptr addrspace(1) %out) #0 {
 ; GCN-LABEL: @get_local_size_z_opaque_pointer(
 ; GCN-NEXT:    [[IMPLICITARG_PTR:%.*]] = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
 ; GCN-NEXT:    [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 16
@@ -59,7 +59,7 @@ define amdgpu_kernel void @get_local_size_z_opaque_pointer(i16 addrspace(1)* %ou
   %local.size.offset = select i1 %cmp.id.count, i64 16, i64 22
   %gep.local.size = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 %local.size.offset
   %local.size = load i16, ptr addrspace(4) %gep.local.size, align 2
-  store i16 %local.size, i16 addrspace(1)* %out
+  store i16 %local.size, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-term.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-term.ll
index 2430868b7986..35f07759d5bf 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-term.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-term.ll
@@ -7,14 +7,14 @@
 declare i32 @llvm.amdgcn.workitem.id.x() #1
 
 ; There should be no spill code inserted between the xor and the real terminator
-define amdgpu_kernel void @extract_w_offset_vgpr(i32 addrspace(1)* %out) {
+define amdgpu_kernel void @extract_w_offset_vgpr(ptr addrspace(1) %out) {
   ; GCN-LABEL: name: extract_w_offset_vgpr
   ; GCN: bb.0.entry:
   ; GCN-NEXT:   successors: %bb.1(0x80000000)
   ; GCN-NEXT:   liveins: $vgpr0, $sgpr0_sgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY killed $vgpr0
-  ; GCN-NEXT:   renamable $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s64) from %ir.out.kernarg.offset.cast, align 4, addrspace 4)
+  ; GCN-NEXT:   renamable $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s64) from %ir.out.kernarg.offset, align 4, addrspace 4)
   ; GCN-NEXT:   renamable $sgpr6 = COPY renamable $sgpr1
   ; GCN-NEXT:   renamable $sgpr0 = COPY renamable $sgpr0, implicit killed $sgpr0_sgpr1
   ; GCN-NEXT:   renamable $sgpr4 = S_MOV_B32 61440
@@ -106,6 +106,6 @@ entry:
   %id = call i32 @llvm.amdgcn.workitem.id.x() #1
   %index = add i32 %id, 1
   %value = extractelement <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>, i32 %index
-  store i32 %value, i32 addrspace(1)* %out
+  store i32 %value, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index aaa4833482e3..924593a1d099 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -33,7 +33,7 @@ define amdgpu_kernel void @insertelement_v2f32_0(ptr addrspace(1) %out, <2 x flo
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 0
-  store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 16
+  store <2 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -64,7 +64,7 @@ define amdgpu_kernel void @insertelement_v2f32_1(ptr addrspace(1) %out, <2 x flo
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 1
-  store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 16
+  store <2 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -95,7 +95,7 @@ define amdgpu_kernel void @insertelement_v2i32_0(ptr addrspace(1) %out, <2 x i32
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i32> %a, i32 999, i32 0
-  store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 16
+  store <2 x i32> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -126,7 +126,7 @@ define amdgpu_kernel void @insertelement_v2i32_1(ptr addrspace(1) %out, <2 x i32
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i32> %a, i32 999, i32 1
-  store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 16
+  store <2 x i32> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -163,7 +163,7 @@ define amdgpu_kernel void @insertelement_v4f32_0(ptr addrspace(1) %out, <4 x flo
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
-  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+  store <4 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -198,7 +198,7 @@ define amdgpu_kernel void @insertelement_v4f32_1(ptr addrspace(1) %out, <4 x flo
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
-  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+  store <4 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -233,7 +233,7 @@ define amdgpu_kernel void @insertelement_v4f32_2(ptr addrspace(1) %out, <4 x flo
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
-  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+  store <4 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -268,7 +268,7 @@ define amdgpu_kernel void @insertelement_v4f32_3(ptr addrspace(1) %out, <4 x flo
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
-  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+  store <4 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -303,7 +303,7 @@ define amdgpu_kernel void @insertelement_v4i32_0(ptr addrspace(1) %out, <4 x i32
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i32> %a, i32 999, i32 0
-  store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
+  store <4 x i32> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -334,7 +334,7 @@ define amdgpu_kernel void @insertelement_v3f32_1(ptr addrspace(1) %out, <3 x flo
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 1
-  store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+  store <3 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -365,7 +365,7 @@ define amdgpu_kernel void @insertelement_v3f32_2(ptr addrspace(1) %out, <3 x flo
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 2
-  store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+  store <3 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -374,7 +374,7 @@ define amdgpu_kernel void @insertelement_v3f32_3(ptr addrspace(1) %out, <3 x flo
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 3
-  store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+  store <3 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -388,7 +388,7 @@ define <4 x float> @insertelement_to_sgpr() nounwind {
 ; GCN-NEXT:    image_gather4_lz v[0:3], v[0:1], s[4:11], s[4:7] dmask:0x1
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %tmp = load <4 x i32>, <4 x i32> addrspace(4)* undef
+  %tmp = load <4 x i32>, ptr addrspace(4) undef
   %tmp1 = insertelement <4 x i32> %tmp, i32 0, i32 0
   %tmp2 = call <4 x float> @llvm.amdgcn.image.gather4.lz.2d.v4f32.f32(i32 1, float undef, float undef, <8 x i32> undef, <4 x i32> %tmp1, i1 0, i32 0, i32 0)
   ret <4 x float> %tmp2
@@ -412,7 +412,7 @@ define <9 x float> @insertelement_to_v9f32_undef() nounwind {
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_mov_b32_e32 v8, s4
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %tmp = load <9 x float>, <9 x float> addrspace(4)* undef
+  %tmp = load <9 x float>, ptr addrspace(4) undef
   %tmp1 = insertelement <9 x float> %tmp, float 5.000, i32 0
   %tmp2 = insertelement <9 x float> %tmp1, float -5.000, i32 2
   %tmp3 = insertelement <9 x float> %tmp2, float 17.000, i32 7
@@ -438,7 +438,7 @@ define <10 x float> @insertelement_to_v10f32_undef() nounwind {
 ; GCN-NEXT:    v_mov_b32_e32 v8, s12
 ; GCN-NEXT:    v_mov_b32_e32 v9, s13
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %tmp = load <10 x float>, <10 x float> addrspace(4)* undef
+  %tmp = load <10 x float>, ptr addrspace(4) undef
   %tmp1 = insertelement <10 x float> %tmp, float 2.0, i32 0
   ret <10 x float> %tmp1
 }
@@ -463,7 +463,7 @@ define <11 x float> @insertelement_to_v11f32_undef() nounwind {
 ; GCN-NEXT:    v_mov_b32_e32 v9, s13
 ; GCN-NEXT:    v_mov_b32_e32 v10, s14
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %tmp = load <11 x float>, <11 x float> addrspace(4)* undef
+  %tmp = load <11 x float>, ptr addrspace(4) undef
   %tmp1 = insertelement <11 x float> %tmp, float 1.000, i32 0
   ret <11 x float> %tmp1
 }
@@ -489,7 +489,7 @@ define <12 x float> @insertelement_to_v12f32_undef() nounwind {
 ; GCN-NEXT:    v_mov_b32_e32 v10, s14
 ; GCN-NEXT:    v_mov_b32_e32 v11, s15
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
-  %tmp = load <12 x float>, <12 x float> addrspace(4)* undef
+  %tmp = load <12 x float>, ptr addrspace(4) undef
   %tmp1 = insertelement <12 x float> %tmp, float 4.0, i32 0
   ret <12 x float> %tmp1
 }
@@ -537,7 +537,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2f32(ptr addrspace(1) %out, <2
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
-  store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
+  store <2 x float> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -590,7 +590,7 @@ define amdgpu_kernel void @dynamic_insertelement_v3f32(ptr addrspace(1) %out, <3
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 %b
-  store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+  store <3 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -651,7 +651,7 @@ define amdgpu_kernel void @dynamic_insertelement_v4f32(ptr addrspace(1) %out, <4
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
-  store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
+  store <4 x float> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -702,7 +702,7 @@ define amdgpu_kernel void @dynamic_insertelement_v8f32(ptr addrspace(1) %out, <8
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
-  store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
+  store <8 x float> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -759,7 +759,7 @@ define amdgpu_kernel void @dynamic_insertelement_v9f32(ptr addrspace(1) %out, <9
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <9 x float> %a, float 5.000000e+00, i32 %b
-  store <9 x float> %vecins, <9 x float> addrspace(1)* %out, align 32
+  store <9 x float> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -818,7 +818,7 @@ define amdgpu_kernel void @dynamic_insertelement_v10f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx2 v[8:9], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <10 x float> %a, float 5.000000e+00, i32 %b
-  store <10 x float> %vecins, <10 x float> addrspace(1)* %out, align 32
+  store <10 x float> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -880,7 +880,7 @@ define amdgpu_kernel void @dynamic_insertelement_v11f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx3 v[8:10], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <11 x float> %a, float 5.000000e+00, i32 %b
-  store <11 x float> %vecins, <11 x float> addrspace(1)* %out, align 32
+  store <11 x float> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -944,7 +944,7 @@ define amdgpu_kernel void @dynamic_insertelement_v12f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <12 x float> %a, float 5.000000e+00, i32 %b
-  store <12 x float> %vecins, <12 x float> addrspace(1)* %out, align 32
+  store <12 x float> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1015,7 +1015,7 @@ define amdgpu_kernel void @dynamic_insertelement_v16f32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
-  store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
+  store <16 x float> %vecins, ptr addrspace(1) %out, align 64
   ret void
 }
 
@@ -1056,7 +1056,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2i32(ptr addrspace(1) %out, <2
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i32> %a, i32 5, i32 %b
-  store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
+  store <2 x i32> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -1101,7 +1101,7 @@ define amdgpu_kernel void @dynamic_insertelement_v3i32(ptr addrspace(1) %out, <3
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i32> %a, i32 5, i32 %b
-  store <3 x i32> %vecins, <3 x i32> addrspace(1)* %out, align 16
+  store <3 x i32> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -1154,7 +1154,7 @@ define amdgpu_kernel void @dynamic_insertelement_v4i32(ptr addrspace(1) %out, <4
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i32> %a, i32 %val, i32 %b
-  store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
+  store <4 x i32> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -1203,7 +1203,7 @@ define amdgpu_kernel void @dynamic_insertelement_v8i32(ptr addrspace(1) %out, <8
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x i32> %a, i32 5, i32 %b
-  store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
+  store <8 x i32> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1258,7 +1258,7 @@ define amdgpu_kernel void @dynamic_insertelement_v9i32(ptr addrspace(1) %out, <9
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <9 x i32> %a, i32 5, i32 %b
-  store <9 x i32> %vecins, <9 x i32> addrspace(1)* %out, align 32
+  store <9 x i32> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1315,7 +1315,7 @@ define amdgpu_kernel void @dynamic_insertelement_v10i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx2 v[8:9], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <10 x i32> %a, i32 5, i32 %b
-  store <10 x i32> %vecins, <10 x i32> addrspace(1)* %out, align 32
+  store <10 x i32> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1375,7 +1375,7 @@ define amdgpu_kernel void @dynamic_insertelement_v11i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx3 v[8:10], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <11 x i32> %a, i32 5, i32 %b
-  store <11 x i32> %vecins, <11 x i32> addrspace(1)* %out, align 32
+  store <11 x i32> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1437,7 +1437,7 @@ define amdgpu_kernel void @dynamic_insertelement_v12i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <12 x i32> %a, i32 5, i32 %b
-  store <12 x i32> %vecins, <12 x i32> addrspace(1)* %out, align 32
+  store <12 x i32> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -1506,7 +1506,7 @@ define amdgpu_kernel void @dynamic_insertelement_v16i32(ptr addrspace(1) %out, <
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x i32> %a, i32 5, i32 %b
-  store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
+  store <16 x i32> %vecins, ptr addrspace(1) %out, align 64
   ret void
 }
 
@@ -1545,7 +1545,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i16> %a, i16 5, i32 %b
-  store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
+  store <2 x i16> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -1595,7 +1595,7 @@ define amdgpu_kernel void @dynamic_insertelement_v3i16(ptr addrspace(1) %out, <3
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i16> %a, i16 5, i32 %b
-  store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8
+  store <3 x i16> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -1635,7 +1635,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2i8(ptr addrspace(1) %out, [8
 ; VI-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i8> %a, i8 5, i32 %b
-  store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
+  store <2 x i8> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -1682,7 +1682,7 @@ define amdgpu_kernel void @dynamic_insertelement_v3i8(ptr addrspace(1) %out, [8
 ; VI-NEXT:    buffer_store_byte v0, off, s[0:3], 0 offset:2
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i8> %a, i8 5, i32 %b
-  store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4
+  store <3 x i8> %vecins, ptr addrspace(1) %out, align 4
   ret void
 }
 
@@ -1721,11 +1721,11 @@ define amdgpu_kernel void @dynamic_insertelement_v4i8(ptr addrspace(1) %out, [8
 ; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i8> %a, i8 5, i32 %b
-  store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 4
+  store <4 x i8> %vecins, ptr addrspace(1) %out, align 4
   ret void
 }
 
-define amdgpu_kernel void @s_dynamic_insertelement_v8i8(ptr addrspace(1) %out, <8 x i8> addrspace(4)* %a.ptr, i32 %b) nounwind {
+define amdgpu_kernel void @s_dynamic_insertelement_v8i8(ptr addrspace(1) %out, ptr addrspace(4) %a.ptr, i32 %b) nounwind {
 ; SI-LABEL: s_dynamic_insertelement_v8i8:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
@@ -1771,9 +1771,9 @@ define amdgpu_kernel void @s_dynamic_insertelement_v8i8(ptr addrspace(1) %out, <
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
-  %a = load <8 x i8>, <8 x i8> addrspace(4)* %a.ptr, align 4
+  %a = load <8 x i8>, ptr addrspace(4) %a.ptr, align 4
   %vecins = insertelement <8 x i8> %a, i8 5, i32 %b
-  store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 8
+  store <8 x i8> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -1980,13 +1980,13 @@ define amdgpu_kernel void @dynamic_insertelement_v16i8(ptr addrspace(1) %out, <1
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <16 x i8> %a, i8 5, i32 %b
-  store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
+  store <16 x i8> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
 ; This test requires handling INSERT_SUBREG in SIFixSGPRCopies.  Check that
 ; the compiler doesn't crash.
-define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
+define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %a, i32 %b) {
 ; SI-LABEL: insert_split_bb:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dword s6, s[4:5], 0x4
@@ -2043,19 +2043,19 @@ entry:
   br i1 %1, label %if, label %else
 
 if:
-  %2 = load i32, i32 addrspace(1)* %in
+  %2 = load i32, ptr addrspace(1) %in
   %3 = insertelement <2 x i32> %0, i32 %2, i32 1
   br label %endif
 
 else:
-  %4 = getelementptr i32, i32 addrspace(1)* %in, i32 1
-  %5 = load i32, i32 addrspace(1)* %4
+  %4 = getelementptr i32, ptr addrspace(1) %in, i32 1
+  %5 = load i32, ptr addrspace(1) %4
   %6 = insertelement <2 x i32> %0, i32 %5, i32 1
   br label %endif
 
 endif:
   %7 = phi <2 x i32> [%3, %if], [%6, %else]
-  store <2 x i32> %7, <2 x i32> addrspace(1)* %out
+  store <2 x i32> %7, ptr addrspace(1) %out
   ret void
 }
 
@@ -2102,7 +2102,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2f64(ptr addrspace(1) %out, [8
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x double> %a, double 8.0, i32 %b
-  store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
+  store <2 x double> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -2149,7 +2149,7 @@ define amdgpu_kernel void @dynamic_insertelement_v2i64(ptr addrspace(1) %out, <2
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i64> %a, i64 5, i32 %b
-  store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
+  store <2 x i64> %vecins, ptr addrspace(1) %out, align 8
   ret void
 }
 
@@ -2210,7 +2210,7 @@ define amdgpu_kernel void @dynamic_insertelement_v3i64(ptr addrspace(1) %out, <3
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i64> %a, i64 5, i32 %b
-  store <3 x i64> %vecins, <3 x i64> addrspace(1)* %out, align 32
+  store <3 x i64> %vecins, ptr addrspace(1) %out, align 32
   ret void
 }
 
@@ -2281,7 +2281,7 @@ define amdgpu_kernel void @dynamic_insertelement_v4f64(ptr addrspace(1) %out, <4
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x double> %a, double 8.0, i32 %b
-  store <4 x double> %vecins, <4 x double> addrspace(1)* %out, align 16
+  store <4 x double> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 
@@ -2356,7 +2356,7 @@ define amdgpu_kernel void @dynamic_insertelement_v8f64(ptr addrspace(1) %out, <8
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x double> %a, double 8.0, i32 %b
-  store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
+  store <8 x double> %vecins, ptr addrspace(1) %out, align 16
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
index dc246b42376a..9cea104c4629 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
@@ -11,8 +11,8 @@
 ; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
 define amdgpu_kernel void @constant_load_i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load i32, i32 addrspace(4)* %in
-  store i32 %ld, i32 addrspace(1)* %out
+  %ld = load i32, ptr addrspace(4) %in
+  store i32 %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -22,8 +22,8 @@ entry:
 ; EG: VTX_READ_64
 define amdgpu_kernel void @constant_load_v2i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <2 x i32>, <2 x i32> addrspace(4)* %in
-  store <2 x i32> %ld, <2 x i32> addrspace(1)* %out
+  %ld = load <2 x i32>, ptr addrspace(4) %in
+  store <2 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -33,8 +33,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v3i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <3 x i32>, <3 x i32> addrspace(4)* %in
-  store <3 x i32> %ld, <3 x i32> addrspace(1)* %out
+  %ld = load <3 x i32>, ptr addrspace(4) %in
+  store <3 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -44,8 +44,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v4i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <4 x i32>, <4 x i32> addrspace(4)* %in
-  store <4 x i32> %ld, <4 x i32> addrspace(1)* %out
+  %ld = load <4 x i32>, ptr addrspace(4) %in
+  store <4 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -56,8 +56,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v8i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <8 x i32>, <8 x i32> addrspace(4)* %in
-  store <8 x i32> %ld, <8 x i32> addrspace(1)* %out
+  %ld = load <8 x i32>, ptr addrspace(4) %in
+  store <8 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -70,8 +70,8 @@ entry:
 ; EG: VTX_READ_32
 define amdgpu_kernel void @constant_load_v9i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <9 x i32>, <9 x i32> addrspace(4)* %in
-  store <9 x i32> %ld, <9 x i32> addrspace(1)* %out
+  %ld = load <9 x i32>, ptr addrspace(4) %in
+  store <9 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -84,8 +84,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v10i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <10 x i32>, <10 x i32> addrspace(4)* %in
-  store <10 x i32> %ld, <10 x i32> addrspace(1)* %out
+  %ld = load <10 x i32>, ptr addrspace(4) %in
+  store <10 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -98,8 +98,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v11i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <11 x i32>, <11 x i32> addrspace(4)* %in
-  store <11 x i32> %ld, <11 x i32> addrspace(1)* %out
+  %ld = load <11 x i32>, ptr addrspace(4) %in
+  store <11 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -112,8 +112,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v12i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <12 x i32>, <12 x i32> addrspace(4)* %in
-  store <12 x i32> %ld, <12 x i32> addrspace(1)* %out
+  %ld = load <12 x i32>, ptr addrspace(4) %in
+  store <12 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -126,8 +126,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @constant_load_v16i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
 entry:
-  %ld = load <16 x i32>, <16 x i32> addrspace(4)* %in
-  store <16 x i32> %ld, <16 x i32> addrspace(1)* %out
+  %ld = load <16 x i32>, ptr addrspace(4) %in
+  store <16 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -140,9 +140,9 @@ entry:
 ; EG: CF_END
 ; EG: VTX_READ_32
 define amdgpu_kernel void @constant_zextload_i32_to_i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load i32, i32 addrspace(4)* %in
+  %ld = load i32, ptr addrspace(4) %in
   %ext = zext i32 %ld to i64
-  store i64 %ext, i64 addrspace(1)* %out
+  store i64 %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -157,9 +157,9 @@ define amdgpu_kernel void @constant_zextload_i32_to_i64(ptr addrspace(1) %out, p
 ; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}},  literal.
 ; EG: 31
 define amdgpu_kernel void @constant_sextload_i32_to_i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load i32, i32 addrspace(4)* %in
+  %ld = load i32, ptr addrspace(4) %in
   %ext = sext i32 %ld to i64
-  store i64 %ext, i64 addrspace(1)* %out
+  store i64 %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -167,9 +167,9 @@ define amdgpu_kernel void @constant_sextload_i32_to_i64(ptr addrspace(1) %out, p
 ; GCN: s_load_dword
 ; GCN: store_dwordx2
 define amdgpu_kernel void @constant_zextload_v1i32_to_v1i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <1 x i32>, <1 x i32> addrspace(4)* %in
+  %ld = load <1 x i32>, ptr addrspace(4) %in
   %ext = zext <1 x i32> %ld to <1 x i64>
-  store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+  store <1 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -178,9 +178,9 @@ define amdgpu_kernel void @constant_zextload_v1i32_to_v1i64(ptr addrspace(1) %ou
 ; GCN: s_ashr_i32 s[[HI:[0-9]+]], s[[LO]], 31
 ; GCN: store_dwordx2
 define amdgpu_kernel void @constant_sextload_v1i32_to_v1i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <1 x i32>, <1 x i32> addrspace(4)* %in
+  %ld = load <1 x i32>, ptr addrspace(4) %in
   %ext = sext <1 x i32> %ld to <1 x i64>
-  store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+  store <1 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -188,9 +188,9 @@ define amdgpu_kernel void @constant_sextload_v1i32_to_v1i64(ptr addrspace(1) %ou
 ; GCN: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
 ; GCN: store_dwordx4
 define amdgpu_kernel void @constant_zextload_v2i32_to_v2i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <2 x i32>, <2 x i32> addrspace(4)* %in
+  %ld = load <2 x i32>, ptr addrspace(4) %in
   %ext = zext <2 x i32> %ld to <2 x i64>
-  store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+  store <2 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -202,9 +202,9 @@ define amdgpu_kernel void @constant_zextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 
 ; GCN: store_dwordx4
 define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <2 x i32>, <2 x i32> addrspace(4)* %in
+  %ld = load <2 x i32>, ptr addrspace(4) %in
   %ext = sext <2 x i32> %ld to <2 x i64>
-  store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+  store <2 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -214,9 +214,9 @@ define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 ; GCN: store_dwordx4
 ; GCN: store_dwordx4
 define amdgpu_kernel void @constant_zextload_v4i32_to_v4i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <4 x i32>, <4 x i32> addrspace(4)* %in
+  %ld = load <4 x i32>, ptr addrspace(4) %in
   %ext = zext <4 x i32> %ld to <4 x i64>
-  store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+  store <4 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -231,9 +231,9 @@ define amdgpu_kernel void @constant_zextload_v4i32_to_v4i64(ptr addrspace(1) %ou
 ; GCN: store_dwordx4
 ; GCN: store_dwordx4
 define amdgpu_kernel void @constant_sextload_v4i32_to_v4i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <4 x i32>, <4 x i32> addrspace(4)* %in
+  %ld = load <4 x i32>, ptr addrspace(4) %in
   %ext = sext <4 x i32> %ld to <4 x i64>
-  store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+  store <4 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -250,9 +250,9 @@ define amdgpu_kernel void @constant_sextload_v4i32_to_v4i64(ptr addrspace(1) %ou
 ; GCN-SA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @constant_zextload_v8i32_to_v8i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <8 x i32>, <8 x i32> addrspace(4)* %in
+  %ld = load <8 x i32>, ptr addrspace(4) %in
   %ext = zext <8 x i32> %ld to <8 x i64>
-  store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+  store <8 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -278,9 +278,9 @@ define amdgpu_kernel void @constant_zextload_v8i32_to_v8i64(ptr addrspace(1) %ou
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @constant_sextload_v8i32_to_v8i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <8 x i32>, <8 x i32> addrspace(4)* %in
+  %ld = load <8 x i32>, ptr addrspace(4) %in
   %ext = sext <8 x i32> %ld to <8 x i64>
-  store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+  store <8 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -299,9 +299,9 @@ define amdgpu_kernel void @constant_sextload_v8i32_to_v8i64(ptr addrspace(1) %ou
 ; GCN: store_dwordx4
 ; GCN: store_dwordx4
 define amdgpu_kernel void @constant_sextload_v16i32_to_v16i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <16 x i32>, <16 x i32> addrspace(4)* %in
+  %ld = load <16 x i32>, ptr addrspace(4) %in
   %ext = sext <16 x i32> %ld to <16 x i64>
-  store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+  store <16 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -326,9 +326,9 @@ define amdgpu_kernel void @constant_sextload_v16i32_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @constant_zextload_v16i32_to_v16i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <16 x i32>, <16 x i32> addrspace(4)* %in
+  %ld = load <16 x i32>, ptr addrspace(4) %in
   %ext = zext <16 x i32> %ld to <16 x i64>
-  store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+  store <16 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -378,9 +378,9 @@ define amdgpu_kernel void @constant_zextload_v16i32_to_v16i64(ptr addrspace(1) %
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 
 define amdgpu_kernel void @constant_sextload_v32i32_to_v32i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(4)* %in
+  %ld = load <32 x i32>, ptr addrspace(4) %in
   %ext = sext <32 x i32> %ld to <32 x i64>
-  store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+  store <32 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -429,9 +429,9 @@ define amdgpu_kernel void @constant_sextload_v32i32_to_v32i64(ptr addrspace(1) %
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @constant_zextload_v32i32_to_v32i64(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(4)* %in
+  %ld = load <32 x i32>, ptr addrspace(4) %in
   %ext = zext <32 x i32> %ld to <32 x i64>
-  store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+  store <32 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -481,8 +481,8 @@ define amdgpu_kernel void @constant_zextload_v32i32_to_v32i64(ptr addrspace(1) %
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @constant_load_v32i32(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(4)* %in
-  store <32 x i32> %ld, <32 x i32> addrspace(1)* %out
+  %ld = load <32 x i32>, ptr addrspace(4) %in
+  store <32 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/load-global-f32.ll b/llvm/test/CodeGen/AMDGPU/load-global-f32.ll
index dd516dcdd8a6..35b405995252 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-f32.ll
@@ -12,8 +12,8 @@
 ; R600: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
 define amdgpu_kernel void @global_load_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load float, float addrspace(1)* %in
-  store float %tmp0, float addrspace(1)* %out
+  %tmp0 = load float, ptr addrspace(1) %in
+  store float %tmp0, ptr addrspace(1) %out
   ret void
 }
 
@@ -24,8 +24,8 @@ entry:
 ; R600: VTX_READ_64
 define amdgpu_kernel void @global_load_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load <2 x float>, <2 x float> addrspace(1)* %in
-  store <2 x float> %tmp0, <2 x float> addrspace(1)* %out
+  %tmp0 = load <2 x float>, ptr addrspace(1) %in
+  store <2 x float> %tmp0, ptr addrspace(1) %out
   ret void
 }
 
@@ -37,8 +37,8 @@ entry:
 ; R600: VTX_READ_128
 define amdgpu_kernel void @global_load_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load <3 x float>, <3 x float> addrspace(1)* %in
-  store <3 x float> %tmp0, <3 x float> addrspace(1)* %out
+  %tmp0 = load <3 x float>, ptr addrspace(1) %in
+  store <3 x float> %tmp0, ptr addrspace(1) %out
   ret void
 }
 
@@ -49,8 +49,8 @@ entry:
 ; R600: VTX_READ_128
 define amdgpu_kernel void @global_load_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load <4 x float>, <4 x float> addrspace(1)* %in
-  store <4 x float> %tmp0, <4 x float> addrspace(1)* %out
+  %tmp0 = load <4 x float>, ptr addrspace(1) %in
+  store <4 x float> %tmp0, ptr addrspace(1) %out
   ret void
 }
 
@@ -64,8 +64,8 @@ entry:
 ; R600: VTX_READ_128
 define amdgpu_kernel void @global_load_v8f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load <8 x float>, <8 x float> addrspace(1)* %in
-  store <8 x float> %tmp0, <8 x float> addrspace(1)* %out
+  %tmp0 = load <8 x float>, ptr addrspace(1) %in
+  store <8 x float> %tmp0, ptr addrspace(1) %out
   ret void
 }
 
@@ -162,8 +162,8 @@ entry:
 ; R600: VTX_READ_128
 define amdgpu_kernel void @global_load_v16f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %tmp0 = load <16 x float>, <16 x float> addrspace(1)* %in
-  store <16 x float> %tmp0, <16 x float> addrspace(1)* %out
+  %tmp0 = load <16 x float>, ptr addrspace(1) %in
+  store <16 x float> %tmp0, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
index e5f25723cb62..f99bd4cb15c6 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i32.ll
@@ -12,8 +12,8 @@
 ; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
 define amdgpu_kernel void @global_load_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load i32, i32 addrspace(1)* %in
-  store i32 %ld, i32 addrspace(1)* %out
+  %ld = load i32, ptr addrspace(1) %in
+  store i32 %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -24,8 +24,8 @@ entry:
 ; EG: VTX_READ_64
 define amdgpu_kernel void @global_load_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
-  store <2 x i32> %ld, <2 x i32> addrspace(1)* %out
+  %ld = load <2 x i32>, ptr addrspace(1) %in
+  store <2 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -37,8 +37,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @global_load_v3i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <3 x i32>, <3 x i32> addrspace(1)* %in
-  store <3 x i32> %ld, <3 x i32> addrspace(1)* %out
+  %ld = load <3 x i32>, ptr addrspace(1) %in
+  store <3 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -49,8 +49,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @global_load_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
-  store <4 x i32> %ld, <4 x i32> addrspace(1)* %out
+  %ld = load <4 x i32>, ptr addrspace(1) %in
+  store <4 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -64,8 +64,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @global_load_v8i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
-  store <8 x i32> %ld, <8 x i32> addrspace(1)* %out
+  %ld = load <8 x i32>, ptr addrspace(1) %in
+  store <8 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -78,8 +78,8 @@ entry:
 ; GCN-HSA: {{flat|global}}_load_dword
 define amdgpu_kernel void @global_load_v9i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <9 x i32>, <9 x i32> addrspace(1)* %in
-  store <9 x i32> %ld, <9 x i32> addrspace(1)* %out
+  %ld = load <9 x i32>, ptr addrspace(1) %in
+  store <9 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -92,8 +92,8 @@ entry:
 ; GCN-HSA: {{flat|global}}_load_dwordx2
 define amdgpu_kernel void @global_load_v10i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <10 x i32>, <10 x i32> addrspace(1)* %in
-  store <10 x i32> %ld, <10 x i32> addrspace(1)* %out
+  %ld = load <10 x i32>, ptr addrspace(1) %in
+  store <10 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -109,8 +109,8 @@ entry:
 ; GCN-HSA: {{flat|global}}_load_dwordx3
 define amdgpu_kernel void @global_load_v11i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <11 x i32>, <11 x i32> addrspace(1)* %in
-  store <11 x i32> %ld, <11 x i32> addrspace(1)* %out
+  %ld = load <11 x i32>, ptr addrspace(1) %in
+  store <11 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -124,8 +124,8 @@ entry:
 ; GCN-HSA: {{flat|global}}_load_dwordx4
 define amdgpu_kernel void @global_load_v12i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <12 x i32>, <12 x i32> addrspace(1)* %in
-  store <12 x i32> %ld, <12 x i32> addrspace(1)* %out
+  %ld = load <12 x i32>, ptr addrspace(1) %in
+  store <12 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -146,8 +146,8 @@ entry:
 ; EG: VTX_READ_128
 define amdgpu_kernel void @global_load_v16i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
 entry:
-  %ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
-  store <16 x i32> %ld, <16 x i32> addrspace(1)* %out
+  %ld = load <16 x i32>, ptr addrspace(1) %in
+  store <16 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 
@@ -161,9 +161,9 @@ entry:
 
 ; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
 define amdgpu_kernel void @global_zextload_i32_to_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load i32, i32 addrspace(1)* %in
+  %ld = load i32, ptr addrspace(1) %in
   %ext = zext i32 %ld to i64
-  store i64 %ext, i64 addrspace(1)* %out
+  store i64 %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -180,9 +180,9 @@ define amdgpu_kernel void @global_zextload_i32_to_i64(ptr addrspace(1) %out, ptr
 ; EG: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, T{{[0-9]\.[XYZW]}},  literal.
 ; EG: 31
 define amdgpu_kernel void @global_sextload_i32_to_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load i32, i32 addrspace(1)* %in
+  %ld = load i32, ptr addrspace(1) %in
   %ext = sext i32 %ld to i64
-  store i64 %ext, i64 addrspace(1)* %out
+  store i64 %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -193,9 +193,9 @@ define amdgpu_kernel void @global_sextload_i32_to_i64(ptr addrspace(1) %out, ptr
 ; GCN-HSA: {{flat|global}}_load_dword
 ; GCN-HSA: {{flat|global}}_store_dwordx2
 define amdgpu_kernel void @global_zextload_v1i32_to_v1i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <1 x i32>, <1 x i32> addrspace(1)* %in
+  %ld = load <1 x i32>, ptr addrspace(1) %in
   %ext = zext <1 x i32> %ld to <1 x i64>
-  store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+  store <1 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -206,9 +206,9 @@ define amdgpu_kernel void @global_zextload_v1i32_to_v1i64(ptr addrspace(1) %out,
 ; GCN-NOHSA: buffer_store_dwordx2 v[[[LO]]:[[HI]]]
 ; GCN-HSA: {{flat|global}}_store_dwordx2 v{{.+}}, v[[[LO]]:[[HI]]]
 define amdgpu_kernel void @global_sextload_v1i32_to_v1i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <1 x i32>, <1 x i32> addrspace(1)* %in
+  %ld = load <1 x i32>, ptr addrspace(1) %in
   %ext = sext <1 x i32> %ld to <1 x i64>
-  store <1 x i64> %ext, <1 x i64> addrspace(1)* %out
+  store <1 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -219,9 +219,9 @@ define amdgpu_kernel void @global_sextload_v1i32_to_v1i64(ptr addrspace(1) %out,
 ; GCN-HSA: {{flat|global}}_load_dwordx2
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_zextload_v2i32_to_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
+  %ld = load <2 x i32>, ptr addrspace(1) %in
   %ext = zext <2 x i32> %ld to <2 x i64>
-  store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+  store <2 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -235,9 +235,9 @@ define amdgpu_kernel void @global_zextload_v2i32_to_v2i64(ptr addrspace(1) %out,
 ; GCN-NOHSA-DAG: buffer_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_sextload_v2i32_to_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <2 x i32>, <2 x i32> addrspace(1)* %in
+  %ld = load <2 x i32>, ptr addrspace(1) %in
   %ext = sext <2 x i32> %ld to <2 x i64>
-  store <2 x i64> %ext, <2 x i64> addrspace(1)* %out
+  store <2 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -250,9 +250,9 @@ define amdgpu_kernel void @global_sextload_v2i32_to_v2i64(ptr addrspace(1) %out,
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_zextload_v4i32_to_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %ld = load <4 x i32>, ptr addrspace(1) %in
   %ext = zext <4 x i32> %ld to <4 x i64>
-  store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+  store <4 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -271,9 +271,9 @@ define amdgpu_kernel void @global_zextload_v4i32_to_v4i64(ptr addrspace(1) %out,
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_sextload_v4i32_to_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %ld = load <4 x i32>, ptr addrspace(1) %in
   %ext = sext <4 x i32> %ld to <4 x i64>
-  store <4 x i64> %ext, <4 x i64> addrspace(1)* %out
+  store <4 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -294,9 +294,9 @@ define amdgpu_kernel void @global_sextload_v4i32_to_v4i64(ptr addrspace(1) %out,
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_zextload_v8i32_to_v8i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
+  %ld = load <8 x i32>, ptr addrspace(1) %in
   %ext = zext <8 x i32> %ld to <8 x i64>
-  store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+  store <8 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -326,9 +326,9 @@ define amdgpu_kernel void @global_zextload_v8i32_to_v8i64(ptr addrspace(1) %out,
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_sextload_v8i32_to_v8i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <8 x i32>, <8 x i32> addrspace(1)* %in
+  %ld = load <8 x i32>, ptr addrspace(1) %in
   %ext = sext <8 x i32> %ld to <8 x i64>
-  store <8 x i64> %ext, <8 x i64> addrspace(1)* %out
+  store <8 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -372,9 +372,9 @@ define amdgpu_kernel void @global_sextload_v8i32_to_v8i64(ptr addrspace(1) %out,
 ; GCN-NOHSA-DAG: buffer_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
+  %ld = load <16 x i32>, ptr addrspace(1) %in
   %ext = sext <16 x i32> %ld to <16 x i64>
-  store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+  store <16 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -407,9 +407,9 @@ define amdgpu_kernel void @global_sextload_v16i32_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 ; GCN-HSA: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_zextload_v16i32_to_v16i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <16 x i32>, <16 x i32> addrspace(1)* %in
+  %ld = load <16 x i32>, ptr addrspace(1) %in
   %ext = zext <16 x i32> %ld to <16 x i64>
-  store <16 x i64> %ext, <16 x i64> addrspace(1)* %out
+  store <16 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -507,9 +507,9 @@ define amdgpu_kernel void @global_zextload_v16i32_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 
 define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(1)* %in
+  %ld = load <32 x i32>, ptr addrspace(1) %in
   %ext = sext <32 x i32> %ld to <32 x i64>
-  store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+  store <32 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -574,9 +574,9 @@ define amdgpu_kernel void @global_sextload_v32i32_to_v32i64(ptr addrspace(1) %ou
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_zextload_v32i32_to_v32i64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(1)* %in
+  %ld = load <32 x i32>, ptr addrspace(1) %in
   %ext = zext <32 x i32> %ld to <32 x i64>
-  store <32 x i64> %ext, <32 x i64> addrspace(1)* %out
+  store <32 x i64> %ext, ptr addrspace(1) %out
   ret void
 }
 
@@ -642,8 +642,8 @@ define amdgpu_kernel void @global_zextload_v32i32_to_v32i64(ptr addrspace(1) %ou
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 ; GCN-HSA-DAG: {{flat|global}}_store_dwordx4
 define amdgpu_kernel void @global_load_v32i32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-  %ld = load <32 x i32>, <32 x i32> addrspace(1)* %in
-  store <32 x i32> %ld, <32 x i32> addrspace(1)* %out
+  %ld = load <32 x i32>, ptr addrspace(1) %in
+  store <32 x i32> %ld, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll b/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
index 559b45f00e10..d5bcf685f9f0 100644
--- a/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
@@ -44,13 +44,13 @@ bb23:                                             ; preds = %bb23, %bb
   %tmp30 = sub i32 %tmp24, %tmp29
   %tmp31 = add i32 %tmp30, %arg16
   %tmp37 = icmp ult i32 %tmp31, %arg13
-  %tmp44 = load float, float addrspace(1)* undef, align 4
-  store float %tmp44, float addrspace(3)* undef, align 4
+  %tmp44 = load float, ptr addrspace(1) undef, align 4
+  store float %tmp44, ptr addrspace(3) undef, align 4
   %tmp47 = add i32 %tmp24, %arg2
   br i1 %tmp37, label %bb23, label %.loopexit
 }
 
-define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3)* nocapture %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(1)* nocapture readonly %arg10, i32 %arg11, i32 %arg12, i32 %arg13, i32 %arg14, i32 %arg15, i32 %arg16, i1 zeroext %arg17, i1 zeroext %arg18) #0 {
+define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, ptr addrspace(3) nocapture %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, ptr addrspace(1) nocapture readonly %arg10, i32 %arg11, i32 %arg12, i32 %arg13, i32 %arg14, i32 %arg15, i32 %arg16, i1 zeroext %arg17, i1 zeroext %arg18) #0 {
 ; GFX9-LABEL: lsr_order_mul24_1:
 ; GFX9:       ; %bb.0: ; %bb
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -100,7 +100,7 @@ define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3
 ; GFX9-NEXT:    v_add_u32_e32 v6, v6, v8
 ; GFX9-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GFX9-NEXT:    s_cbranch_execnz .LBB1_2
-; GFX9-NEXT:  .LBB1_3: ; %Flow3
+; GFX9-NEXT:  .LBB1_3: ; %Flow2
 ; GFX9-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -137,11 +137,11 @@ bb23:                                             ; preds = %bb19, %bb23
   %tmp40 = and i1 %tmp39, %arg17
   %tmp41 = zext i32 %tmp35 to i64
   %tmp42 = select i1 %tmp40, i64 %tmp41, i64 0
-  %tmp43 = getelementptr inbounds float, float addrspace(1)* %arg10, i64 %tmp42
-  %tmp44 = load float, float addrspace(1)* %tmp43, align 4
+  %tmp43 = getelementptr inbounds float, ptr addrspace(1) %arg10, i64 %tmp42
+  %tmp44 = load float, ptr addrspace(1) %tmp43, align 4
   %tmp45 = select i1 %tmp40, float %tmp44, float 0.000000e+00
-  %tmp46 = getelementptr inbounds float, float addrspace(3)* %arg3, i32 %tmp36
-  store float %tmp45, float addrspace(3)* %tmp46, align 4
+  %tmp46 = getelementptr inbounds float, ptr addrspace(3) %arg3, i32 %tmp36
+  store float %tmp45, ptr addrspace(3) %tmp46, align 4
   %tmp47 = add i32 %tmp24, %arg2
   %tmp48 = icmp ult i32 %tmp47, %arg1
   br i1 %tmp48, label %bb23, label %.loopexit
@@ -170,17 +170,17 @@ define void @slsr1_0(i32 %b.arg, i32 %s.arg) #0 {
   %mul0 = mul i32 %b, %s
 ; CHECK: mul i32
 ; CHECK-NOT: mul i32
-  store volatile i32 %mul0, i32 addrspace(1)* undef
+  store volatile i32 %mul0, ptr addrspace(1) undef
 
   ; foo((b + 1) * s);
   %b1 = add i32 %b, 1
   %mul1 = mul i32 %b1, %s
-  store volatile i32 %mul1, i32 addrspace(1)* undef
+  store volatile i32 %mul1, ptr addrspace(1) undef
 
   ; foo((b + 2) * s);
   %b2 = add i32 %b, 2
   %mul2 = mul i32 %b2, %s
-  store volatile i32 %mul2, i32 addrspace(1)* undef
+  store volatile i32 %mul2, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
index bd03b9571529..a704eec662f8 100644
--- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
@@ -16,16 +16,16 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
   ; REGALLOC-GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3080202 /* regdef:VReg_64 */, def %23
   ; REGALLOC-GFX908-NEXT:   SI_SPILL_V64_SAVE %23, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
   ; REGALLOC-GFX908-NEXT:   [[COPY1:%[0-9]+]]:vreg_128 = COPY [[COPY]]
-  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef %14:vreg_64, [[COPY1]], 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; REGALLOC-GFX908-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset.cast, addrspace 4)
+  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef %14:vreg_64, [[COPY1]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
+  ; REGALLOC-GFX908-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
   ; REGALLOC-GFX908-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
   ; REGALLOC-GFX908-NEXT:   [[COPY2:%[0-9]+]]:areg_128 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
   ; REGALLOC-GFX908-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
   ; REGALLOC-GFX908-NEXT:   [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128 = V_MFMA_I32_4X4X4I8_e64 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], [[COPY2]], 0, 0, 0, implicit $mode, implicit $exec
   ; REGALLOC-GFX908-NEXT:   [[SI_SPILL_V64_RESTORE:%[0-9]+]]:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
-  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX2 undef %16:vreg_64, [[SI_SPILL_V64_RESTORE]], 0, 0, implicit $exec :: (volatile store (s64) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
+  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX2 undef %16:vreg_64, [[SI_SPILL_V64_RESTORE]], 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
   ; REGALLOC-GFX908-NEXT:   [[COPY3:%[0-9]+]]:vreg_128 = COPY [[V_MFMA_I32_4X4X4I8_e64_]]
-  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef %18:vreg_64, [[COPY3]], 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; REGALLOC-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef %18:vreg_64, [[COPY3]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
   ; REGALLOC-GFX908-NEXT:   S_ENDPGM 0
   ; PEI-GFX908-LABEL: name: partial_copy
   ; PEI-GFX908: bb.0 (%ir-block.0):
@@ -41,17 +41,17 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
   ; PEI-GFX908-NEXT:   BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, 0, 4, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
   ; PEI-GFX908-NEXT:   $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
   ; PEI-GFX908-NEXT:   renamable $vgpr0_vgpr1_vgpr2_vgpr3 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3, implicit $exec
-  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; PEI-GFX908-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset.cast, addrspace 4)
+  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
+  ; PEI-GFX908-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
   ; PEI-GFX908-NEXT:   renamable $vgpr0 = V_MOV_B32_e32 1, implicit $exec
   ; PEI-GFX908-NEXT:   renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec
   ; PEI-GFX908-NEXT:   renamable $vgpr1 = V_MOV_B32_e32 2, implicit $exec
   ; PEI-GFX908-NEXT:   renamable $agpr0_agpr1_agpr2_agpr3 = V_MFMA_I32_4X4X4I8_e64 killed $vgpr0, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
   ; PEI-GFX908-NEXT:   $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr8_sgpr9_sgpr10_sgpr11, 0, 4, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1 :: (load (s32) from %stack.0, addrspace 5)
   ; PEI-GFX908-NEXT:   $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit-def $vgpr0_vgpr1
-  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (volatile store (s64) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
+  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
   ; PEI-GFX908-NEXT:   renamable $vgpr0_vgpr1_vgpr2_vgpr3 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3, implicit $exec
-  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; PEI-GFX908-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
   ; PEI-GFX908-NEXT:   S_ENDPGM 0
   ; REGALLOC-GFX90A-LABEL: name: partial_copy
   ; REGALLOC-GFX90A: bb.0 (%ir-block.0):
@@ -62,15 +62,15 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
   ; REGALLOC-GFX90A-NEXT:   [[COPY:%[0-9]+]]:av_128_align2 = COPY %25
   ; REGALLOC-GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3407882 /* regdef:VReg_64_Align2 */, def %23
   ; REGALLOC-GFX90A-NEXT:   SI_SPILL_V64_SAVE %23, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
-  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef %14:vreg_64_align2, [[COPY]], 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; REGALLOC-GFX90A-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset.cast, addrspace 4)
+  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef %14:vreg_64_align2, [[COPY]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
+  ; REGALLOC-GFX90A-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
   ; REGALLOC-GFX90A-NEXT:   [[COPY1:%[0-9]+]]:areg_128_align2 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
   ; REGALLOC-GFX90A-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
   ; REGALLOC-GFX90A-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
   ; REGALLOC-GFX90A-NEXT:   [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_I32_4X4X4I8_e64 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], [[COPY1]], 0, 0, 0, implicit $mode, implicit $exec
   ; REGALLOC-GFX90A-NEXT:   [[SI_SPILL_AV64_RESTORE:%[0-9]+]]:av_64_align2 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
-  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX2 undef %16:vreg_64_align2, [[SI_SPILL_AV64_RESTORE]], 0, 0, implicit $exec :: (volatile store (s64) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
-  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef %18:vreg_64_align2, [[V_MFMA_I32_4X4X4I8_e64_]], 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX2 undef %16:vreg_64_align2, [[SI_SPILL_AV64_RESTORE]], 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
+  ; REGALLOC-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef %18:vreg_64_align2, [[V_MFMA_I32_4X4X4I8_e64_]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
   ; REGALLOC-GFX90A-NEXT:   S_ENDPGM 0
   ; PEI-GFX90A-LABEL: name: partial_copy
   ; PEI-GFX90A: bb.0 (%ir-block.0):
@@ -85,24 +85,24 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
   ; PEI-GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3407882 /* regdef:VReg_64_Align2 */, def renamable $vgpr0_vgpr1
   ; PEI-GFX90A-NEXT:   BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, 0, 4, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $vgpr0_vgpr1 :: (store (s32) into %stack.0, addrspace 5)
   ; PEI-GFX90A-NEXT:   $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit killed $vgpr0_vgpr1
-  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; PEI-GFX90A-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset.cast, addrspace 4)
+  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
+  ; PEI-GFX90A-NEXT:   renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
   ; PEI-GFX90A-NEXT:   renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec
   ; PEI-GFX90A-NEXT:   renamable $vgpr0 = V_MOV_B32_e32 1, implicit $exec
   ; PEI-GFX90A-NEXT:   renamable $vgpr1 = V_MOV_B32_e32 2, implicit $exec
   ; PEI-GFX90A-NEXT:   renamable $agpr0_agpr1_agpr2_agpr3 = V_MFMA_I32_4X4X4I8_e64 killed $vgpr0, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
   ; PEI-GFX90A-NEXT:   $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr8_sgpr9_sgpr10_sgpr11, 0, 4, 0, 0, implicit $exec, implicit-def $vgpr0_vgpr1 :: (load (s32) from %stack.0, addrspace 5)
   ; PEI-GFX90A-NEXT:   $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit-def $vgpr0_vgpr1
-  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (volatile store (s64) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
-  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX2 undef renamable $vgpr0_vgpr1, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) undef`, addrspace 1)
+  ; PEI-GFX90A-NEXT:   GLOBAL_STORE_DWORDX4 undef renamable $vgpr0_vgpr1, killed renamable $agpr0_agpr1_agpr2_agpr3, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) undef`, addrspace 1)
   ; PEI-GFX90A-NEXT:   S_ENDPGM 0
   call void asm sideeffect "; use $0", "a" (i32 undef)
   %v0 = call <4 x i32> asm sideeffect "; def $0", "=v" ()
   %v1 = call <2 x i32> asm sideeffect "; def $0", "=v" ()
   %mai = tail call <4 x i32> @llvm.amdgcn.mfma.i32.4x4x4i8(i32 1, i32 2, <4 x i32> %arg, i32 0, i32 0, i32 0)
-  store volatile <4 x i32> %v0, <4 x i32> addrspace(1)* undef
-  store volatile <2 x i32> %v1, <2 x i32> addrspace(1)* undef
-  store volatile <4 x i32> %mai, <4 x i32> addrspace(1)* undef
+  store volatile <4 x i32> %v0, ptr addrspace(1) undef
+  store volatile <2 x i32> %v1, ptr addrspace(1) undef
+  store volatile <4 x i32> %mai, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-addrspacecast.ll
index 7f137d5ab751..8a467812ec48 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-addrspacecast.ll
@@ -3,17 +3,15 @@
 ; The types of the users of the addrspacecast should not be changed.
 
 ; CHECK-LABEL: @invalid_bitcast_addrspace(
-; CHECK: getelementptr inbounds [256 x [1 x i32]], [256 x [1 x i32]] addrspace(3)* @invalid_bitcast_addrspace.data, i32 0, i32 %14
-; CHECK: bitcast [1 x i32] addrspace(3)* %{{[0-9]+}} to half addrspace(3)*
-; CHECK: addrspacecast half addrspace(3)* %tmp to half*
-; CHECK: bitcast half* %tmp1 to <2 x i16>*
+; CHECK: [[GEP:%[0-9]+]] = getelementptr inbounds [256 x [1 x i32]], ptr addrspace(3) @invalid_bitcast_addrspace.data, i32 0, i32 %{{[0-9]+}}
+; CHECK: [[ASC:%[a-z0-9]+]] = addrspacecast ptr addrspace(3) [[GEP]] to ptr
+; CHECK: [[LOAD:%[a-z0-9]+]] = load <2 x i16>, ptr [[ASC]]
+; CHECK: bitcast <2 x i16> [[LOAD]] to <2 x half>
 define amdgpu_kernel void @invalid_bitcast_addrspace() #0 {
 entry:
   %data = alloca [1 x i32], addrspace(5)
-  %tmp = bitcast [1 x i32] addrspace(5)* %data to half addrspace(5)*
-  %tmp1 = addrspacecast half addrspace(5)* %tmp to half*
-  %tmp2 = bitcast half* %tmp1 to <2 x i16>*
-  %tmp3 = load <2 x i16>, <2 x i16>* %tmp2, align 2
+  %tmp1 = addrspacecast ptr addrspace(5) %data to ptr
+  %tmp3 = load <2 x i16>, ptr %tmp1, align 2
   %tmp4 = bitcast <2 x i16> %tmp3 to <2 x half>
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 9ca9f80a5eaf..a46abf6770bb 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -7,7 +7,7 @@
 
 declare i64 @_Z13get_global_idj(i32) #0
 
-define amdgpu_kernel void @clmem_read_simplified(i8 addrspace(1)*  %buffer) {
+define amdgpu_kernel void @clmem_read_simplified(ptr addrspace(1)  %buffer) {
 ; GFX8-LABEL: clmem_read_simplified:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -381,41 +381,40 @@ entry:
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 7
   %idx.ext11 = and i64 %a0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-  %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
-  %load1 = load i64, i64 addrspace(1)* %addr1, align 8
-  %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 256
-  %load2 = load i64, i64 addrspace(1)* %addr2, align 8
+  %addr1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 %conv
+  %load1 = load i64, ptr addrspace(1) %addr1, align 8
+  %addr2 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 256
+  %load2 = load i64, ptr addrspace(1) %addr2, align 8
   %add.1 = add i64 %load2, %load1
 
-  %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 512
-  %load3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
+  %add.ptr8.2 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 512
+  %load3 = load i64, ptr addrspace(1) %add.ptr8.2, align 8
   %add.2 = add i64 %load3, %add.1
-  %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 768
-  %load4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
+  %add.ptr8.3 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 768
+  %load4 = load i64, ptr addrspace(1) %add.ptr8.3, align 8
   %add.3 = add i64 %load4, %add.2
 
-  %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1024
-  %load5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
+  %add.ptr8.4 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1024
+  %load5 = load i64, ptr addrspace(1) %add.ptr8.4, align 8
   %add.4 = add i64 %load5, %add.3
-  %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1280
-  %load6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
+  %add.ptr8.5 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1280
+  %load6 = load i64, ptr addrspace(1) %add.ptr8.5, align 8
   %add.5 = add i64 %load6, %add.4
 
-  %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1536
-  %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
+  %add.ptr8.6 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1536
+  %load7 = load i64, ptr addrspace(1) %add.ptr8.6, align 8
   %add.6 = add i64 %load7, %add.5
-  %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1792
-  %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
+  %add.ptr8.7 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1792
+  %load8 = load i64, ptr addrspace(1) %add.ptr8.7, align 8
   %add.7 = add i64 %load8, %add.6
 
-  store i64 %add.7, i64 addrspace(1)* %saddr, align 8
+  store i64 %add.7, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
-define hidden amdgpu_kernel void @clmem_read(i8 addrspace(1)*  %buffer) {
+define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX8-LABEL: clmem_read:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -1033,9 +1032,8 @@ entry:
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 17
   %idx.ext11 = and i64 %a0, 4261412864
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %a1 = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
-  %add.ptr6 = getelementptr inbounds i64, i64 addrspace(1)* %a1, i64 %conv
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
+  %add.ptr6 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 %conv
   br label %for.cond.preheader
 
 while.cond.loopexit:                              ; preds = %for.body
@@ -1052,68 +1050,68 @@ for.body:                                         ; preds = %for.body, %for.cond
   %block.029 = phi i32 [ 0, %for.cond.preheader ], [ %add9.31, %for.body ]
   %sum.128 = phi i64 [ %sum.030, %for.cond.preheader ], [ %add.10, %for.body ]
   %conv3 = zext i32 %block.029 to i64
-  %add.ptr8 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3
-  %load1 = load i64, i64 addrspace(1)* %add.ptr8, align 8
+  %add.ptr8 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3
+  %load1 = load i64, ptr addrspace(1) %add.ptr8, align 8
   %add = add i64 %load1, %sum.128
 
   %add9 = or i32 %block.029, 256
   %conv3.1 = zext i32 %add9 to i64
-  %add.ptr8.1 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.1
-  %load2 = load i64, i64 addrspace(1)* %add.ptr8.1, align 8
+  %add.ptr8.1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.1
+  %load2 = load i64, ptr addrspace(1) %add.ptr8.1, align 8
   %add.1 = add i64 %load2, %add
 
   %add9.1 = or i32 %block.029, 512
   %conv3.2 = zext i32 %add9.1 to i64
-  %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.2
-  %l3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
+  %add.ptr8.2 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.2
+  %l3 = load i64, ptr addrspace(1) %add.ptr8.2, align 8
   %add.2 = add i64 %l3, %add.1
 
   %add9.2 = or i32 %block.029, 768
   %conv3.3 = zext i32 %add9.2 to i64
-  %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.3
-  %l4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
+  %add.ptr8.3 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.3
+  %l4 = load i64, ptr addrspace(1) %add.ptr8.3, align 8
   %add.3 = add i64 %l4, %add.2
 
   %add9.3 = or i32 %block.029, 1024
   %conv3.4 = zext i32 %add9.3 to i64
-  %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.4
-  %l5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
+  %add.ptr8.4 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.4
+  %l5 = load i64, ptr addrspace(1) %add.ptr8.4, align 8
   %add.4 = add i64 %l5, %add.3
 
   %add9.4 = or i32 %block.029, 1280
   %conv3.5 = zext i32 %add9.4 to i64
-  %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.5
-  %l6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
+  %add.ptr8.5 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.5
+  %l6 = load i64, ptr addrspace(1) %add.ptr8.5, align 8
   %add.5 = add i64 %l6, %add.4
 
   %add9.5 = or i32 %block.029, 1536
   %conv3.6 = zext i32 %add9.5 to i64
-  %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.6
-  %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
+  %add.ptr8.6 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.6
+  %load7 = load i64, ptr addrspace(1) %add.ptr8.6, align 8
   %add.6 = add i64 %load7, %add.5
 
   %add9.6 = or i32 %block.029, 1792
   %conv3.7 = zext i32 %add9.6 to i64
-  %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.7
-  %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
+  %add.ptr8.7 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.7
+  %load8 = load i64, ptr addrspace(1) %add.ptr8.7, align 8
   %add.7 = add i64 %load8, %add.6
 
   %add9.7 = or i32 %block.029, 2048
   %conv3.8 = zext i32 %add9.7 to i64
-  %add.ptr8.8 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.8
-  %load9 = load i64, i64 addrspace(1)* %add.ptr8.8, align 8
+  %add.ptr8.8 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.8
+  %load9 = load i64, ptr addrspace(1) %add.ptr8.8, align 8
   %add.8 = add i64 %load9, %add.7
 
   %add9.8 = or i32 %block.029, 2304
   %conv3.9 = zext i32 %add9.8 to i64
-  %add.ptr8.9 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.9
-  %load10 = load i64, i64 addrspace(1)* %add.ptr8.9, align 8
+  %add.ptr8.9 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.9
+  %load10 = load i64, ptr addrspace(1) %add.ptr8.9, align 8
   %add.9 = add i64 %load10, %add.8
 
   %add9.9 = or i32 %block.029, 2560
   %conv3.10 = zext i32 %add9.9 to i64
-  %add.ptr8.10 = getelementptr inbounds i64, i64 addrspace(1)* %add.ptr6, i64 %conv3.10
-  %load11 = load i64, i64 addrspace(1)* %add.ptr8.10, align 8
+  %add.ptr8.10 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr6, i64 %conv3.10
+  %load11 = load i64, ptr addrspace(1) %add.ptr8.10, align 8
   %add.10 = add i64 %load11, %add.9
 
   %add9.31 = add nuw nsw i32 %block.029, 8192
@@ -1121,12 +1119,12 @@ for.body:                                         ; preds = %for.body, %for.cond
   br i1 %cmp.31, label %for.body, label %while.cond.loopexit
 
 while.end:                                        ; preds = %while.cond.loopexit
-  store i64 %add.10, i64 addrspace(1)* %a1, align 8
+  store i64 %add.10, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
 ; using 32bit address.
-define amdgpu_kernel void @Address32(i8 addrspace(1)* %buffer) {
+define amdgpu_kernel void @Address32(ptr addrspace(1) %buffer) {
 ; GFX8-LABEL: Address32:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -1458,53 +1456,52 @@ entry:
    %conv = and i64 %call, 255
    %id = shl i64 %call, 7
    %idx.ext11 = and i64 %id, 4294934528
-   %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-   %addr = bitcast i8 addrspace(1)* %add.ptr12 to i32 addrspace(1)*
+   %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-   %add.ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %addr, i64 %conv
-   %load1 = load i32, i32 addrspace(1)* %add.ptr6, align 4
+   %add.ptr6 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr12, i64 %conv
+   %load1 = load i32, ptr addrspace(1) %add.ptr6, align 4
 
-   %add.ptr8.1 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 256
-   %load2 = load i32, i32 addrspace(1)* %add.ptr8.1, align 4
+   %add.ptr8.1 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 256
+   %load2 = load i32, ptr addrspace(1) %add.ptr8.1, align 4
    %add.1 = add i32 %load2, %load1
 
-   %add.ptr8.2 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 512
-   %load3 = load i32, i32 addrspace(1)* %add.ptr8.2, align 4
+   %add.ptr8.2 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 512
+   %load3 = load i32, ptr addrspace(1) %add.ptr8.2, align 4
    %add.2 = add i32 %load3, %add.1
 
-   %add.ptr8.3 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 768
-   %load4 = load i32, i32 addrspace(1)* %add.ptr8.3, align 4
+   %add.ptr8.3 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 768
+   %load4 = load i32, ptr addrspace(1) %add.ptr8.3, align 4
    %add.3 = add i32 %load4, %add.2
 
-   %add.ptr8.4 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1024
-   %load5 = load i32, i32 addrspace(1)* %add.ptr8.4, align 4
+   %add.ptr8.4 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 1024
+   %load5 = load i32, ptr addrspace(1) %add.ptr8.4, align 4
    %add.4 = add i32 %load5, %add.3
 
-   %add.ptr8.5 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1280
-   %load6 = load i32, i32 addrspace(1)* %add.ptr8.5, align 4
+   %add.ptr8.5 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 1280
+   %load6 = load i32, ptr addrspace(1) %add.ptr8.5, align 4
    %add.5 = add i32 %load6, %add.4
 
-   %add.ptr8.6 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1536
-   %load7 = load i32, i32 addrspace(1)* %add.ptr8.6, align 4
+   %add.ptr8.6 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 1536
+   %load7 = load i32, ptr addrspace(1) %add.ptr8.6, align 4
    %add.6 = add i32 %load7, %add.5
 
-   %add.ptr8.7 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 1792
-   %load8 = load i32, i32 addrspace(1)* %add.ptr8.7, align 4
+   %add.ptr8.7 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 1792
+   %load8 = load i32, ptr addrspace(1) %add.ptr8.7, align 4
    %add.7 = add i32 %load8, %add.6
 
-   %add.ptr8.8 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 2048
-   %load9 = load i32, i32 addrspace(1)* %add.ptr8.8, align 4
+   %add.ptr8.8 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 2048
+   %load9 = load i32, ptr addrspace(1) %add.ptr8.8, align 4
    %add.8 = add i32 %load9, %add.7
 
-   %add.ptr8.9 = getelementptr inbounds i32, i32 addrspace(1)* %add.ptr6, i64 2304
-   %load10 = load i32, i32 addrspace(1)* %add.ptr8.9, align 4
+   %add.ptr8.9 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr6, i64 2304
+   %load10 = load i32, ptr addrspace(1) %add.ptr8.9, align 4
    %add.9 = add i32 %load10, %add.8
 
-   store i32 %add.9, i32 addrspace(1)* %addr, align 4
+   store i32 %add.9, ptr addrspace(1) %add.ptr12, align 4
    ret void
 }
 
-define amdgpu_kernel void @Offset64(i8 addrspace(1)*  %buffer) {
+define amdgpu_kernel void @Offset64(ptr addrspace(1)  %buffer) {
 ; GFX8-LABEL: Offset64:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -1760,32 +1757,31 @@ entry:
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 7
   %idx.ext11 = and i64 %a0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-  %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
-  %load1 = load i64, i64 addrspace(1)* %addr1, align 8
+  %addr1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 %conv
+  %load1 = load i64, ptr addrspace(1) %addr1, align 8
 
-  %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870400
-  %load2 = load i64, i64 addrspace(1)* %addr2, align 8
+  %addr2 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 536870400
+  %load2 = load i64, ptr addrspace(1) %addr2, align 8
 
   %add1 = add i64 %load2, %load1
 
-  %addr3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870656
-  %load3 = load i64, i64 addrspace(1)* %addr3, align 8
+  %addr3 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 536870656
+  %load3 = load i64, ptr addrspace(1) %addr3, align 8
 
   %add2 = add i64 %load3, %add1
 
-  %addr4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 536870912
-  %load4 = load i64, i64 addrspace(1)* %addr4, align 8
+  %addr4 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 536870912
+  %load4 = load i64, ptr addrspace(1) %addr4, align 8
   %add4 = add i64 %load4, %add2
 
-  store i64 %add4, i64 addrspace(1)* %saddr, align 8
+  store i64 %add4, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
 ; TODO: Support load4 as anchor instruction.
-define amdgpu_kernel void @p32Offset64(i8 addrspace(1)*  %buffer) {
+define amdgpu_kernel void @p32Offset64(ptr addrspace(1)  %buffer) {
 ; GFX8-LABEL: p32Offset64:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -2017,31 +2013,30 @@ entry:
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 7
   %idx.ext11 = and i64 %a0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i32 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-  %addr1 = getelementptr inbounds i32, i32 addrspace(1)* %saddr, i64 %conv
-  %load1 = load i32, i32 addrspace(1)* %addr1, align 8
+  %addr1 = getelementptr inbounds i32, ptr addrspace(1) %add.ptr12, i64 %conv
+  %load1 = load i32, ptr addrspace(1) %addr1, align 8
 
-  %addr2 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870400
-  %load2 = load i32, i32 addrspace(1)* %addr2, align 8
+  %addr2 = getelementptr inbounds i32, ptr addrspace(1) %addr1, i64 536870400
+  %load2 = load i32, ptr addrspace(1) %addr2, align 8
 
   %add1 = add i32 %load2, %load1
 
-  %addr3 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870656
-  %load3 = load i32, i32 addrspace(1)* %addr3, align 8
+  %addr3 = getelementptr inbounds i32, ptr addrspace(1) %addr1, i64 536870656
+  %load3 = load i32, ptr addrspace(1) %addr3, align 8
 
   %add2 = add i32 %load3, %add1
 
-  %addr4 = getelementptr inbounds i32, i32 addrspace(1)* %addr1, i64 536870912
-  %load4 = load i32, i32 addrspace(1)* %addr4, align 8
+  %addr4 = getelementptr inbounds i32, ptr addrspace(1) %addr1, i64 536870912
+  %load4 = load i32, ptr addrspace(1) %addr4, align 8
   %add4 = add i32 %load4, %add2
 
-  store i32 %add4, i32 addrspace(1)* %saddr, align 8
+  store i32 %add4, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
-define amdgpu_kernel void @DiffBase(i8 addrspace(1)* %buffer1,
+define amdgpu_kernel void @DiffBase(ptr addrspace(1) %buffer1,
 ; GFX8-LABEL: DiffBase:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -2282,45 +2277,43 @@ define amdgpu_kernel void @DiffBase(i8 addrspace(1)* %buffer1,
 ; GFX11-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
-                                    i8 addrspace(1)* %buffer2) {
+                                    ptr addrspace(1) %buffer2) {
 entry:
   %call = tail call i64 @_Z13get_global_idj(i32 0)
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 7
   %idx.ext11 = and i64 %a0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer1, i64 %idx.ext11
-  %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer1, i64 %idx.ext11
 
-  %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %buffer2, i64 %idx.ext11
-  %saddr2 = bitcast i8 addrspace(1)* %add.ptr2 to i64 addrspace(1)*
+  %add.ptr2 = getelementptr inbounds i8, ptr addrspace(1) %buffer2, i64 %idx.ext11
 
-  %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 512
-  %load1 = load i64, i64 addrspace(1)* %addr1, align 8
-  %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 768
-  %load2 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
+  %addr1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 512
+  %load1 = load i64, ptr addrspace(1) %addr1, align 8
+  %add.ptr8.3 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 768
+  %load2 = load i64, ptr addrspace(1) %add.ptr8.3, align 8
   %add1 = add i64 %load2, %load1
-  %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 1024
-  %load3 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
+  %add.ptr8.4 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 1024
+  %load3 = load i64, ptr addrspace(1) %add.ptr8.4, align 8
   %add2 = add i64 %load3, %add1
 
-  %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1280
-  %load4 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
+  %add.ptr8.5 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr2, i64 1280
+  %load4 = load i64, ptr addrspace(1) %add.ptr8.5, align 8
 
-  %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1536
-  %load5 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
+  %add.ptr8.6 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr2, i64 1536
+  %load5 = load i64, ptr addrspace(1) %add.ptr8.6, align 8
   %add3 = add i64 %load5, %load4
 
-  %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %saddr2, i64 1792
-  %load6 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
+  %add.ptr8.7 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr2, i64 1792
+  %load6 = load i64, ptr addrspace(1) %add.ptr8.7, align 8
   %add4 = add i64 %load6, %add3
 
   %add5 = add i64 %add2, %add4
 
-  store i64 %add5, i64 addrspace(1)* %saddr, align 8
+  store i64 %add5, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
-define amdgpu_kernel void @ReverseOrder(i8 addrspace(1)* %buffer) {
+define amdgpu_kernel void @ReverseOrder(ptr addrspace(1) %buffer) {
 ; GFX8-LABEL: ReverseOrder:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -2693,45 +2686,44 @@ entry:
   %conv = and i64 %call, 255
   %a0 = shl i64 %call, 7
   %idx.ext11 = and i64 %a0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %saddr = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-  %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %saddr, i64 %conv
-  %load1 = load i64, i64 addrspace(1)* %addr1, align 8
+  %addr1 = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 %conv
+  %load1 = load i64, ptr addrspace(1) %addr1, align 8
 
-  %add.ptr8.7 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1792
-  %load8 = load i64, i64 addrspace(1)* %add.ptr8.7, align 8
+  %add.ptr8.7 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1792
+  %load8 = load i64, ptr addrspace(1) %add.ptr8.7, align 8
   %add7 = add i64 %load8, %load1
 
-  %add.ptr8.6 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1536
-  %load7 = load i64, i64 addrspace(1)* %add.ptr8.6, align 8
+  %add.ptr8.6 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1536
+  %load7 = load i64, ptr addrspace(1) %add.ptr8.6, align 8
   %add6 = add i64 %load7, %add7
 
-  %add.ptr8.5 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1280
-  %load6 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8
+  %add.ptr8.5 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1280
+  %load6 = load i64, ptr addrspace(1) %add.ptr8.5, align 8
   %add5 = add i64 %load6, %add6
 
-  %add.ptr8.4 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 1024
-  %load5 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8
+  %add.ptr8.4 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 1024
+  %load5 = load i64, ptr addrspace(1) %add.ptr8.4, align 8
   %add4 = add i64 %load5, %add5
 
-  %add.ptr8.3 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 768
-  %load4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8
+  %add.ptr8.3 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 768
+  %load4 = load i64, ptr addrspace(1) %add.ptr8.3, align 8
   %add3 = add i64 %load4, %add4
 
-  %add.ptr8.2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 512
-  %load3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8
+  %add.ptr8.2 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 512
+  %load3 = load i64, ptr addrspace(1) %add.ptr8.2, align 8
   %add2 = add i64 %load3, %add3
 
-  %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %addr1, i64 256
-  %load2 = load i64, i64 addrspace(1)* %addr2, align 8
+  %addr2 = getelementptr inbounds i64, ptr addrspace(1) %addr1, i64 256
+  %load2 = load i64, ptr addrspace(1) %addr2, align 8
   %add1 = add i64 %load2, %add2
 
-  store i64 %add1, i64 addrspace(1)* %saddr, align 8
+  store i64 %add1, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 
-define hidden amdgpu_kernel void @negativeoffset(i8 addrspace(1)* nocapture %buffer) {
+define hidden amdgpu_kernel void @negativeoffset(ptr addrspace(1) nocapture %buffer) {
 ; GFX8-LABEL: negativeoffset:
 ; GFX8:       ; %bb.0: ; %entry
 ; GFX8-NEXT:    s_mov_b32 s36, SCRATCH_RSRC_DWORD0
@@ -2944,21 +2936,20 @@ entry:
   %conv = and i64 %call, 255
   %0 = shl i64 %call, 7
   %idx.ext11 = and i64 %0, 4294934528
-  %add.ptr12 = getelementptr inbounds i8, i8 addrspace(1)* %buffer, i64 %idx.ext11
-  %buffer_head = bitcast i8 addrspace(1)* %add.ptr12 to i64 addrspace(1)*
+  %add.ptr12 = getelementptr inbounds i8, ptr addrspace(1) %buffer, i64 %idx.ext11
 
-  %buffer_wave = getelementptr inbounds i64, i64 addrspace(1)* %buffer_head, i64 %conv
+  %buffer_wave = getelementptr inbounds i64, ptr addrspace(1) %add.ptr12, i64 %conv
 
-  %addr1 = getelementptr inbounds i64, i64 addrspace(1)* %buffer_wave, i64 -536870656
-  %load1 = load i64, i64 addrspace(1)* %addr1, align 8
+  %addr1 = getelementptr inbounds i64, ptr addrspace(1) %buffer_wave, i64 -536870656
+  %load1 = load i64, ptr addrspace(1) %addr1, align 8
 
-  %addr2 = getelementptr inbounds i64, i64 addrspace(1)* %buffer_wave, i64 -536870912
-  %load2 = load i64, i64 addrspace(1)* %addr2, align 8
+  %addr2 = getelementptr inbounds i64, ptr addrspace(1) %buffer_wave, i64 -536870912
+  %load2 = load i64, ptr addrspace(1) %addr2, align 8
 
 
   %add = add i64 %load2, %load1
 
-  store i64 %add, i64 addrspace(1)* %buffer_head, align 8
+  store i64 %add, ptr addrspace(1) %add.ptr12, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
index d2ae6cf60681..67b5ae74e943 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
@@ -156,23 +156,23 @@ define amdgpu_kernel void @kernel_64_256() #7 {
 ; CHECK-NEXT:    call void @merge_cycle_0()
 ; CHECK-NEXT:    call void @default_captured_address()
 ; CHECK-NEXT:    call void @externally_visible_default()
-; CHECK-NEXT:    [[F32:%.*]] = call float bitcast (i32 ()* @bitcasted_function to float ()*)()
+; CHECK-NEXT:    [[F32:%.*]] = call float @bitcasted_function()
 ; CHECK-NEXT:    ret void
 ;
   call void @merge_cycle_0()
   call void @default_captured_address()
   call void @externally_visible_default()
-  %f32 = call float bitcast (i32 ()* @bitcasted_function to float ()*)()
+  %f32 = call float @bitcasted_function()
   ret void
 }
 
 define internal void @default_captured_address() {
 ; CHECK-LABEL: define {{[^@]+}}@default_captured_address
 ; CHECK-SAME: () #[[ATTR8:[0-9]+]] {
-; CHECK-NEXT:    store volatile void ()* @default_captured_address, void ()** undef, align 8
+; CHECK-NEXT:    store volatile ptr @default_captured_address, ptr undef, align 8
 ; CHECK-NEXT:    ret void
 ;
-  store volatile void ()* @default_captured_address, void ()** undef, align 8
+  store volatile ptr @default_captured_address, ptr undef, align 8
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index fcaba91cea8f..04ae40800b36 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -24,19 +24,19 @@ define internal void @indirect() {
 define amdgpu_kernel void @test_simple_indirect_call() {
 ; AKF_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
 ; AKF_GCN-SAME: () #[[ATTR0:[0-9]+]] {
-; AKF_GCN-NEXT:    [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
-; AKF_GCN-NEXT:    [[FPTR_CAST:%.*]] = addrspacecast void ()* addrspace(5)* [[FPTR]] to void ()**
-; AKF_GCN-NEXT:    store void ()* @indirect, void ()** [[FPTR_CAST]], align 8
-; AKF_GCN-NEXT:    [[FP:%.*]] = load void ()*, void ()** [[FPTR_CAST]], align 8
+; AKF_GCN-NEXT:    [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
+; AKF_GCN-NEXT:    [[FPTR_CAST:%.*]] = addrspacecast ptr addrspace(5) [[FPTR]] to ptr
+; AKF_GCN-NEXT:    store ptr @indirect, ptr [[FPTR_CAST]], align 8
+; AKF_GCN-NEXT:    [[FP:%.*]] = load ptr, ptr [[FPTR_CAST]], align 8
 ; AKF_GCN-NEXT:    call void [[FP]]()
 ; AKF_GCN-NEXT:    ret void
 ;
 ; ATTRIBUTOR_GCN-LABEL: define {{[^@]+}}@test_simple_indirect_call
 ; ATTRIBUTOR_GCN-SAME: () #[[ATTR1:[0-9]+]] {
-; ATTRIBUTOR_GCN-NEXT:    [[FPTR:%.*]] = alloca void ()*, align 8, addrspace(5)
-; ATTRIBUTOR_GCN-NEXT:    [[FPTR_CAST:%.*]] = addrspacecast void ()* addrspace(5)* [[FPTR]] to void ()**
-; ATTRIBUTOR_GCN-NEXT:    store void ()* @indirect, void ()** [[FPTR_CAST]], align 8
-; ATTRIBUTOR_GCN-NEXT:    [[FP:%.*]] = load void ()*, void ()** [[FPTR_CAST]], align 8
+; ATTRIBUTOR_GCN-NEXT:    [[FPTR:%.*]] = alloca ptr, align 8, addrspace(5)
+; ATTRIBUTOR_GCN-NEXT:    [[FPTR_CAST:%.*]] = addrspacecast ptr addrspace(5) [[FPTR]] to ptr
+; ATTRIBUTOR_GCN-NEXT:    store ptr @indirect, ptr [[FPTR_CAST]], align 8
+; ATTRIBUTOR_GCN-NEXT:    [[FP:%.*]] = load ptr, ptr [[FPTR_CAST]], align 8
 ; ATTRIBUTOR_GCN-NEXT:    call void [[FP]]()
 ; ATTRIBUTOR_GCN-NEXT:    ret void
 ;
@@ -62,10 +62,10 @@ define amdgpu_kernel void @test_simple_indirect_call() {
 ; GFX9-NEXT:    ds_write_b64 v0, v[3:4]
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[6:7]
 ; GFX9-NEXT:    s_endpgm
-  %fptr = alloca void()*, addrspace(5)
-  %fptr.cast = addrspacecast void()* addrspace(5)* %fptr to void()**
-  store void()* @indirect, void()** %fptr.cast
-  %fp = load void()*, void()** %fptr.cast
+  %fptr = alloca ptr, addrspace(5)
+  %fptr.cast = addrspacecast ptr addrspace(5) %fptr to ptr
+  store ptr @indirect, ptr %fptr.cast
+  %fp = load ptr, ptr %fptr.cast
   call void %fp()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-csr-frame-ptr-reg-copy.ll b/llvm/test/CodeGen/AMDGPU/spill-csr-frame-ptr-reg-copy.ll
index 6ce2f226a869..601c36f3146a 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-csr-frame-ptr-reg-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-csr-frame-ptr-reg-copy.ll
@@ -25,10 +25,10 @@ define void @spill_csr_s5_copy() #0 {
 bb:
   %alloca = alloca i32, addrspace(5)
   %tmp = tail call i64 @func() #1
-  %tmp1 = getelementptr inbounds i32, i32 addrspace(1)* null, i64 %tmp
-  %tmp2 = load i32, i32 addrspace(1)* %tmp1, align 4
+  %tmp1 = getelementptr inbounds i32, ptr addrspace(1) null, i64 %tmp
+  %tmp2 = load i32, ptr addrspace(1) %tmp1, align 4
   %tmp3 = zext i32 %tmp2 to i64
-  store volatile i32 9, i32 addrspace(5)* %alloca
+  store volatile i32 9, ptr addrspace(5) %alloca
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/trunc-store-i64.ll b/llvm/test/CodeGen/AMDGPU/trunc-store-i64.ll
index 85029a544383..e8452da140b5 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc-store-i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc-store-i64.ll
@@ -2,38 +2,38 @@
 
 ; GCN-LABEL: {{^}}trunc_store_v4i64_v4i8:
 ; GCN: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}
-define amdgpu_kernel void @trunc_store_v4i64_v4i8(< 4 x i8> addrspace(1)* %out, <4 x i64> %in) {
+define amdgpu_kernel void @trunc_store_v4i64_v4i8(ptr addrspace(1) %out, <4 x i64> %in) {
 entry:
   %trunc = trunc <4 x i64> %in to < 4 x i8>
-  store <4 x i8> %trunc, <4 x i8> addrspace(1)* %out
+  store <4 x i8> %trunc, ptr addrspace(1) %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}trunc_store_v8i64_v8i8:
 ; GCN: global_store_dwordx2 v{{[0-9]+}}, v{{\[[0-9]:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
-define amdgpu_kernel void @trunc_store_v8i64_v8i8(< 8 x i8> addrspace(1)* %out, <8 x i64> %in) {
+define amdgpu_kernel void @trunc_store_v8i64_v8i8(ptr addrspace(1) %out, <8 x i64> %in) {
 entry:
   %trunc = trunc <8 x i64> %in to < 8 x i8>
-  store <8 x i8> %trunc, <8 x i8> addrspace(1)* %out
+  store <8 x i8> %trunc, ptr addrspace(1) %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}trunc_store_v8i64_v8i16:
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
-define amdgpu_kernel void @trunc_store_v8i64_v8i16(< 8 x i16> addrspace(1)* %out, <8 x i64> %in) {
+define amdgpu_kernel void @trunc_store_v8i64_v8i16(ptr addrspace(1) %out, <8 x i64> %in) {
 entry:
   %trunc = trunc <8 x i64> %in to < 8 x i16>
-  store <8 x i16> %trunc, <8 x i16> addrspace(1)* %out
+  store <8 x i16> %trunc, ptr addrspace(1) %out
   ret void
 }
 
 ; GCN-LABEL: {{^}}trunc_store_v8i64_v8i32:
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:16
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]$}}
-define amdgpu_kernel void @trunc_store_v8i64_v8i32(< 8 x i32> addrspace(1)* %out, <8 x i64> %in) {
+define amdgpu_kernel void @trunc_store_v8i64_v8i32(ptr addrspace(1) %out, <8 x i64> %in) {
 entry:
   %trunc = trunc <8 x i64> %in to <8 x i32>
-  store <8 x i32> %trunc, <8 x i32> addrspace(1)* %out
+  store <8 x i32> %trunc, ptr addrspace(1) %out
   ret void
 }
 
@@ -42,9 +42,9 @@ entry:
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:32
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}} offset:16
 ; GCN: global_store_dwordx4 v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]$}}
-define amdgpu_kernel void @trunc_store_v16i64_v16i32(< 16 x i32> addrspace(1)* %out, <16 x i64> %in) {
+define amdgpu_kernel void @trunc_store_v16i64_v16i32(ptr addrspace(1) %out, <16 x i64> %in) {
 entry:
   %trunc = trunc <16 x i64> %in to <16 x i32>
-  store <16 x i32> %trunc, <16 x i32> addrspace(1)* %out
+  store <16 x i32> %trunc, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll b/llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
index 94f9cc8ede9e..542af0e1a224 100644
--- a/llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
+++ b/llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll
@@ -34,7 +34,7 @@ B30.1:
 B30.2:
   %v3 = phi <4 x float> [ %sub, %B30.1 ], [ %v0, %B2 ]
   %ve0 = extractelement <4 x float> %v3, i32 0
-  store float %ve0, float addrspace(3)* undef, align 4
+  store float %ve0, ptr addrspace(3) undef, align 4
   ret void
 }
 
@@ -50,10 +50,10 @@ define amdgpu_ps float @valley_partially_undef_copy() #0 {
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], 0 glc
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0x7fc00000
-; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], 0
 ; CHECK-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], 0
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; CHECK-NEXT:    s_waitcnt expcnt(0)
+; CHECK-NEXT:    s_waitcnt expcnt(1)
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 1, v1
 ; CHECK-NEXT:  .LBB1_1: ; %bb9
@@ -67,8 +67,8 @@ define amdgpu_ps float @valley_partially_undef_copy() #0 {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0)
 ; CHECK-NEXT:    ; return to shader part epilog
 bb:
-  %tmp = load volatile i32, i32 addrspace(1)* undef, align 4
-  %tmp1 = load volatile i32, i32 addrspace(1)* undef, align 4
+  %tmp = load volatile i32, ptr addrspace(1) undef, align 4
+  %tmp1 = load volatile i32, ptr addrspace(1) undef, align 4
   %tmp2 = insertelement <4 x i32> undef, i32 %tmp1, i32 0
   %tmp3 = bitcast i32 %tmp1 to float
   %tmp4 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %tmp3, float %tmp3, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
@@ -76,8 +76,8 @@ bb:
   %tmp6 = fmul float %tmp5, undef
   %tmp7 = fadd float %tmp6, %tmp6
   %tmp8 = insertelement <4 x i32> %tmp2, i32 %tmp, i32 1
-  store <4 x i32> %tmp8, <4 x i32> addrspace(1)* undef, align 16
-  store float %tmp7, float addrspace(1)* undef, align 4
+  store <4 x i32> %tmp8, ptr addrspace(1) undef, align 16
+  store float %tmp7, ptr addrspace(1) undef, align 4
   br label %bb9
 
 bb9:                                              ; preds = %bb9, %bb
@@ -85,7 +85,7 @@ bb9:                                              ; preds = %bb9, %bb
   br i1 %tmp10, label %bb9, label %bb11
 
 bb11:                                             ; preds = %bb9
-  store <4 x i32> %tmp2, <4 x i32> addrspace(1)* undef, align 16
+  store <4 x i32> %tmp2, ptr addrspace(1) undef, align 16
   ret float undef
 }
 
@@ -118,7 +118,7 @@ define amdgpu_kernel void @partially_undef_copy() #0 {
   %partially.undef.0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
   %partially.undef.1 = insertelement <4 x i32> %partially.undef.0, i32 %tmp1, i32 0
 
-  store volatile <4 x i32> %partially.undef.1, <4 x i32> addrspace(1)* undef, align 16
+  store volatile <4 x i32> %partially.undef.1, ptr addrspace(1) undef, align 16
   tail call void asm sideeffect "v_nop", "v={v[5:8]}"(<4 x i32> %partially.undef.0)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
index ff1b276af74d..284dc5630d62 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
@@ -12,10 +12,10 @@
 define void @foo() #0 {
 ; CHECK-LABEL: define {{[^@]+}}@foo
 ; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    store i32 0, i32* @x, align 4
+; CHECK-NEXT:    store i32 0, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
+  store i32 0, ptr @x
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
index eeb2c3df025f..7f248e2ec7f6 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
@@ -2,7 +2,7 @@
 ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-attributor < %s | FileCheck %s
 
 ;.
-; CHECK: @[[G1:[a-zA-Z0-9_$"\\.-]+]] = global i32* null
+; CHECK: @[[G1:[a-zA-Z0-9_$"\\.-]+]] = global ptr null
 ; CHECK: @[[G2:[a-zA-Z0-9_$"\\.-]+]] = global i32 0
 ;.
 define weak void @weak() {
@@ -15,17 +15,17 @@ define weak void @weak() {
   ret void
 }
 
- at G1 = global i32* null
+ at G1 = global ptr null
 
 define internal void @internal1() {
 ; CHECK-LABEL: define {{[^@]+}}@internal1
 ; CHECK-SAME: () #[[ATTR0]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32*, i32** @G1, align 8
-; CHECK-NEXT:    store i32 0, i32* [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr @G1, align 8
+; CHECK-NEXT:    store i32 0, ptr [[TMP1]], align 4
 ; CHECK-NEXT:    ret void
 ;
-  %1 = load i32*, i32** @G1
-  store i32 0, i32* %1
+  %1 = load ptr, ptr @G1
+  store i32 0, ptr %1
   ret void
 }
 
@@ -44,7 +44,7 @@ define amdgpu_kernel void @kernel1() #0 {
 define internal void @internal3() {
 ; CHECK-LABEL: define {{[^@]+}}@internal3
 ; CHECK-SAME: () #[[ATTR1]] {
-; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @G2, align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @G2, align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
 ; CHECK-NEXT:    br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]]
 ; CHECK:       3:
@@ -54,7 +54,7 @@ define internal void @internal3() {
 ; CHECK:       4:
 ; CHECK-NEXT:    ret void
 ;
-  %1 = load i32, i32* @G2, align 4
+  %1 = load i32, ptr @G2, align 4
   %2 = icmp eq i32 %1, 0
   br i1 %2, label %3, label %4
 3:
@@ -68,10 +68,10 @@ define internal void @internal3() {
 define internal void @internal4() {
 ; CHECK-LABEL: define {{[^@]+}}@internal4
 ; CHECK-SAME: () #[[ATTR1]] {
-; CHECK-NEXT:    store i32 1, i32* @G2, align 4
+; CHECK-NEXT:    store i32 1, ptr @G2, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 1, i32* @G2, align 4
+  store i32 1, ptr @G2, align 4
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
index 53b3bde39ba2..27e187684238 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
@@ -12,10 +12,10 @@
 define void @func() #0 {
 ; CHECK-LABEL: define {{[^@]+}}@func
 ; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    store i32 0, i32* @x, align 4
+; CHECK-NEXT:    store i32 0, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
+  store i32 0, ptr @x
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
index c28aa55e346f..be5be1b28d27 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
@@ -80,20 +80,20 @@ exit:
   ret i32 1
 }
 
-define amdgpu_kernel void @kernel(i32 addrspace(1)* %m) #1 {
+define amdgpu_kernel void @kernel(ptr addrspace(1) %m) #1 {
 ; CHECK-LABEL: define {{[^@]+}}@kernel
-; CHECK-SAME: (i32 addrspace(1)* [[M:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-SAME: (ptr addrspace(1) [[M:%.*]]) #[[ATTR2:[0-9]+]] {
 ; CHECK-NEXT:    [[R:%.*]] = call i32 @fib(i32 5)
 ; CHECK-NEXT:    [[R2:%.*]] = call i32 @fib_internal(i32 5)
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[M]], align 4
-; CHECK-NEXT:    store i32 [[R2]], i32 addrspace(1)* [[M]], align 4
+; CHECK-NEXT:    store i32 [[R]], ptr addrspace(1) [[M]], align 4
+; CHECK-NEXT:    store i32 [[R2]], ptr addrspace(1) [[M]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %r = call i32 @fib(i32 5)
   %r2 = call i32 @fib_internal(i32 5)
 
-  store i32 %r, i32 addrspace(1)* %m
-  store i32 %r2, i32 addrspace(1)* %m
+  store i32 %r, ptr addrspace(1) %m
+  store i32 %r2, ptr addrspace(1) %m
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
index 423627e582d2..f67b10f111a5 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
@@ -8,20 +8,20 @@
 define void @func1() {
 ; CHECK-LABEL: define {{[^@]+}}@func1
 ; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT:    store i32 0, i32* @x, align 4
+; CHECK-NEXT:    store i32 0, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
+  store i32 0, ptr @x
   ret void
 }
 
 define void @func4() {
 ; CHECK-LABEL: define {{[^@]+}}@func4
 ; CHECK-SAME: () #[[ATTR0]] {
-; CHECK-NEXT:    store i32 0, i32* @x, align 4
+; CHECK-NEXT:    store i32 0, ptr @x, align 4
 ; CHECK-NEXT:    ret void
 ;
-  store i32 0, i32* @x
+  store i32 0, ptr @x
   ret void
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
index d11e348682c8..22f0c2810672 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-addrspacecast.ll
@@ -4,24 +4,23 @@
 
 ; OPT-LABEL: @vector_addrspacecast(
 ; OPT: alloca [3 x i32]
-; OPT: store i32 0, i32 addrspace(5)* %a0, align 4
-; OPT: store i32 1, i32 addrspace(5)* %a1, align 4
-; OPT: store i32 2, i32 addrspace(5)* %a2, align 4
-; OPT: %tmp = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i64 0, i64 %index
-; OPT: %ac = addrspacecast i32 addrspace(5)* %tmp to i32*
-; OPT: %data = load i32, i32* %ac, align 4
-define amdgpu_kernel void @vector_addrspacecast(i32 addrspace(1)* %out, i64 %index) {
+; OPT: store i32 0, ptr addrspace(5) %alloca, align 4
+; OPT: store i32 1, ptr addrspace(5) %a1, align 4
+; OPT: store i32 2, ptr addrspace(5) %a2, align 4
+; OPT: %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i64 0, i64 %index
+; OPT: %ac = addrspacecast ptr addrspace(5) %tmp to ptr
+; OPT: %data = load i32, ptr %ac, align 4
+define amdgpu_kernel void @vector_addrspacecast(ptr addrspace(1) %out, i64 %index) {
 entry:
   %alloca = alloca [3 x i32], addrspace(5)
-  %a0 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 0
-  %a1 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 1
-  %a2 = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i32 0, i32 2
-  store i32 0, i32 addrspace(5)* %a0
-  store i32 1, i32 addrspace(5)* %a1
-  store i32 2, i32 addrspace(5)* %a2
-  %tmp = getelementptr [3 x i32], [3 x i32] addrspace(5)* %alloca, i64 0, i64 %index
-  %ac = addrspacecast i32 addrspace(5)* %tmp to i32 *
-  %data = load i32, i32 * %ac
-  store i32 %data, i32 addrspace(1)* %out
+  %a1 = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 1
+  %a2 = getelementptr [3 x i32], ptr addrspace(5) %alloca, i32 0, i32 2
+  store i32 0, ptr addrspace(5) %alloca
+  store i32 1, ptr addrspace(5) %a1
+  store i32 2, ptr addrspace(5) %a2
+  %tmp = getelementptr [3 x i32], ptr addrspace(5) %alloca, i64 0, i64 %index
+  %ac = addrspacecast ptr addrspace(5) %tmp to ptr
+  %data = load i32, ptr %ac
+  store i32 %data, ptr addrspace(1) %out
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll b/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
index 9ade55001c7c..c109d38b9cb2 100644
--- a/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
@@ -1,17 +1,16 @@
 ; RUN: opt -S -mtriple=amdgcn-- -passes=load-store-vectorizer < %s | FileCheck -check-prefix=OPT %s
 
 ; OPT-LABEL: @func(
-define void @func(i32 addrspace(7)* %out) {
+define void @func(ptr addrspace(7) %out) {
 entry:
-  %a0 = getelementptr i32, i32 addrspace(7)* %out, i32 0
-  %a1 = getelementptr i32, i32 addrspace(7)* %out, i32 1
-  %a2 = getelementptr i32, i32 addrspace(7)* %out, i32 2
-  %a3 = getelementptr i32, i32 addrspace(7)* %out, i32 3
+  %a1 = getelementptr i32, ptr addrspace(7) %out, i32 1
+  %a2 = getelementptr i32, ptr addrspace(7) %out, i32 2
+  %a3 = getelementptr i32, ptr addrspace(7) %out, i32 3
 
-; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> addrspace(7)* %0, align 4
-  store i32 0, i32 addrspace(7)* %a0
-  store i32 1, i32 addrspace(7)* %a1
-  store i32 2, i32 addrspace(7)* %a2
-  store i32 3, i32 addrspace(7)* %a3
+; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr addrspace(7) %out, align 4
+  store i32 0, ptr addrspace(7) %out
+  store i32 1, ptr addrspace(7) %a1
+  store i32 2, ptr addrspace(7) %a2
+  store i32 3, ptr addrspace(7) %a3
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
index 852b980f0f20..a97c436d303b 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
@@ -382,7 +382,7 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 
 main_body:
   %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f32(i32 1, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
-  store <4 x float> %v, <4 x float> addrspace(1)* undef
+  store <4 x float> %v, ptr addrspace(1) undef
   call void @extern_func()
   %v1 = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f32(i32 1, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
   ret <4 x float> %v1


        


More information about the llvm-commits mailing list