[llvm] a982f09 - InferAddressSpaces: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 27 17:26:21 PST 2022


Author: Matt Arsenault
Date: 2022-11-27T20:26:16-05:00
New Revision: a982f095677cf1834d981e92efdddd0b36accd67

URL: https://github.com/llvm/llvm-project/commit/a982f095677cf1834d981e92efdddd0b36accd67
DIFF: https://github.com/llvm/llvm-project/commit/a982f095677cf1834d981e92efdddd0b36accd67.diff

LOG: InferAddressSpaces: Convert tests to opaque pointers

Had constantexprs be mangled by the opaquify script; had to update
those lines manually:
  NVPTX/bug31948.ll
  AMDGPU/old-pass-regressions.ll
  AMDGPU/old-pass-regressions-inseltpoison.ll
  AMDGPU/infer-address-space.ll

Required re-reunning update_test_checks:
  AMDGPU/redundant-addrspacecast.ll

In AMDGPU/insert-pos-assert.ll, bitcast_insert_pos_assert_2 deleted a
getelementptr of 0 which I'm guessing was relevant. Replaced with an
offset 1 GEP to ensure another addrspacecast is inserted.

AMDGPU/infer-getelementptr.ll had one case improve by introducing an
inbounds.

Added: 
    

Modified: 
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/assumed-addrspace.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/debug-info.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue53665.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/no-flat-addrspace.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions-inseltpoison.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/redundant-addrspacecast.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/self-phi.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/unreachable-code-assert.ll
    llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
    llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
    llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
    llvm/test/Transforms/InferAddressSpaces/NVPTX/clone_constexpr.ll
    llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
index 63cabb97c237f..43501a2ae8241 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll
@@ -1,55 +1,55 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -instsimplify %s | FileCheck %s
 
-define amdgpu_kernel void @is_local_true(i8 addrspace(3)* %lptr) {
+define amdgpu_kernel void @is_local_true(ptr addrspace(3) %lptr) {
 ; CHECK-LABEL: @is_local_true(
-; CHECK-NEXT:    store i32 1, i32 addrspace(1)* undef
+; CHECK-NEXT:    store i32 1, ptr addrspace(1) undef
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i8 addrspace(3)* %lptr to i8*
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %cast)
+  %cast = addrspacecast ptr addrspace(3) %lptr to ptr
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %cast)
   %ext = zext i1 %is.shared to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
-define amdgpu_kernel void @is_local_false(i8 addrspace(1)* %gptr) {
+define amdgpu_kernel void @is_local_false(ptr addrspace(1) %gptr) {
 ; CHECK-LABEL: @is_local_false(
-; CHECK-NEXT:    store i32 0, i32 addrspace(1)* undef
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) undef
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i8 addrspace(1)* %gptr to i8*
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %cast)
+  %cast = addrspacecast ptr addrspace(1) %gptr to ptr
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %cast)
   %ext = zext i1 %is.shared to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
-define void @is_private_true(i8 addrspace(5)* %lptr) {
+define void @is_private_true(ptr addrspace(5) %lptr) {
 ; CHECK-LABEL: @is_private_true(
-; CHECK-NEXT:    store i32 1, i32 addrspace(1)* undef
+; CHECK-NEXT:    store i32 1, ptr addrspace(1) undef
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i8 addrspace(5)* %lptr to i8*
-  %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast)
+  %cast = addrspacecast ptr addrspace(5) %lptr to ptr
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %cast)
   %ext = zext i1 %is.private to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
-define void @is_private_false(i8 addrspace(1)* %gptr) {
+define void @is_private_false(ptr addrspace(1) %gptr) {
 ; CHECK-LABEL: @is_private_false(
-; CHECK-NEXT:    store i32 0, i32 addrspace(1)* undef
+; CHECK-NEXT:    store i32 0, ptr addrspace(1) undef
 ; CHECK-NEXT:    ret void
 ;
-  %cast = addrspacecast i8 addrspace(1)* %gptr to i8*
-  %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast)
+  %cast = addrspacecast ptr addrspace(1) %gptr to ptr
+  %is.private = call i1 @llvm.amdgcn.is.private(ptr %cast)
   %ext = zext i1 %is.private to i32
-  store i32 %ext, i32 addrspace(1)* undef
+  store i32 %ext, ptr addrspace(1) undef
   ret void
 }
 
-declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0
-declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0
+declare i1 @llvm.amdgcn.is.shared(ptr nocapture) #0
+declare i1 @llvm.amdgcn.is.private(ptr nocapture) #0
 
 attributes #0 = { nounwind readnone speculatable }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/assumed-addrspace.ll
index 8ce9ecf4281e1..96f10498576fd 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/assumed-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/assumed-addrspace.ll
@@ -1,31 +1,30 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -o - %s | FileCheck %s
 
- at c0 = addrspace(4) global float* undef
+ at c0 = addrspace(4) global ptr undef
 
 ; CHECK-LABEL: @generic_ptr_from_constant
-; CHECK: addrspacecast float* %p to float addrspace(1)*
-; CHECK-NEXT: load float, float addrspace(1)*
+; CHECK: addrspacecast ptr %p to ptr addrspace(1)
+; CHECK-NEXT: load float, ptr addrspace(1)
 define float @generic_ptr_from_constant() {
-  %p = load float*, float* addrspace(4)* @c0
-  %v = load float, float* %p
+  %p = load ptr, ptr addrspace(4) @c0
+  %v = load float, ptr %p
   ret float %v
 }
 
-%struct.S = type { i32*, float* }
+%struct.S = type { ptr, ptr }
 
 ; CHECK-LABEL: @generic_ptr_from_aggregate_argument
-; CHECK: addrspacecast i32* %p0 to i32 addrspace(1)*
-; CHECK: addrspacecast float* %p1 to float addrspace(1)*
-; CHECK: load i32, i32 addrspace(1)*
-; CHECK: store float %v1, float addrspace(1)*
+; CHECK: addrspacecast ptr %p0 to ptr addrspace(1)
+; CHECK: addrspacecast ptr %p1 to ptr addrspace(1)
+; CHECK: load i32, ptr addrspace(1)
+; CHECK: store float %v1, ptr addrspace(1)
 ; CHECK: ret
-define amdgpu_kernel void @generic_ptr_from_aggregate_argument(%struct.S addrspace(4)* byref(%struct.S) align 8 %0) {
-  %f0 = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0
-  %p0 = load i32*, i32* addrspace(4)* %f0
-  %f1 = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1
-  %p1 = load float*, float* addrspace(4)* %f1
-  %v0 = load i32, i32* %p0
+define amdgpu_kernel void @generic_ptr_from_aggregate_argument(ptr addrspace(4) byref(%struct.S) align 8 %0) {
+  %p0 = load ptr, ptr addrspace(4) %0
+  %f1 = getelementptr inbounds %struct.S, ptr addrspace(4) %0, i64 0, i32 1
+  %p1 = load ptr, ptr addrspace(4) %f1
+  %v0 = load i32, ptr %p0
   %v1 = sitofp i32 %v0 to float
-  store float %v1, float* %p1
+  store float %v1, ptr %p1
   ret void
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
index e0a0043d76b7b..53230a42e5d19 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/basic.ll
@@ -3,182 +3,182 @@
 ; Trivial optimization of generic addressing
 
 ; CHECK-LABEL: @load_global_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*
-; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(1) %tmp0
 ; CHECK-NEXT: ret float %tmp1
-define float @load_global_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*
-  %tmp1 = load float, float addrspace(1)* %tmp0
+define float @load_global_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
+  %tmp1 = load float, ptr addrspace(1) %tmp0
   ret float %tmp1
 }
 
 ; CHECK-LABEL: @load_constant_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(4)*
-; CHECK-NEXT: %tmp1 = load float, float addrspace(4)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(4)
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(4) %tmp0
 ; CHECK-NEXT: ret float %tmp1
-define float @load_constant_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(4)*
-  %tmp1 = load float, float addrspace(4)* %tmp0
+define float @load_constant_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(4)
+  %tmp1 = load float, ptr addrspace(4) %tmp0
   ret float %tmp1
 }
 
 ; CHECK-LABEL: @load_group_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(3)*
-; CHECK-NEXT: %tmp1 = load float, float addrspace(3)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(3) %tmp0
 ; CHECK-NEXT: ret float %tmp1
-define float @load_group_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(3)*
-  %tmp1 = load float, float addrspace(3)* %tmp0
+define float @load_group_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
+  %tmp1 = load float, ptr addrspace(3) %tmp0
   ret float %tmp1
 }
 
 ; CHECK-LABEL: @load_private_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(5)*
-; CHECK-NEXT: %tmp1 = load float, float addrspace(5)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(5) %tmp0
 ; CHECK-NEXT: ret float %tmp1
-define float @load_private_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(5)*
-  %tmp1 = load float, float addrspace(5)* %tmp0
+define float @load_private_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
+  %tmp1 = load float, ptr addrspace(5) %tmp0
   ret float %tmp1
 }
 
 ; CHECK-LABEL: @store_global_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*
-; CHECK-NEXT: store float 0.000000e+00, float addrspace(1)* %tmp0
-define amdgpu_kernel void @store_global_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*
-  store float 0.0, float addrspace(1)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
+; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(1) %tmp0
+define amdgpu_kernel void @store_global_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1)
+  store float 0.0, ptr addrspace(1) %tmp0
   ret void
 }
 
 ; CHECK-LABEL: @store_group_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(3)*
-; CHECK-NEXT: store float 0.000000e+00, float addrspace(3)* %tmp0
-define amdgpu_kernel void @store_group_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(3)*
-  store float 0.0, float addrspace(3)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
+; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(3) %tmp0
+define amdgpu_kernel void @store_group_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(3)
+  store float 0.0, ptr addrspace(3) %tmp0
   ret void
 }
 
 ; CHECK-LABEL: @store_private_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(5)*
-; CHECK-NEXT: store float 0.000000e+00, float addrspace(5)* %tmp0
-define amdgpu_kernel void @store_private_from_flat(float* %generic_scalar) #0 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(5)*
-  store float 0.0, float addrspace(5)* %tmp0
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
+; CHECK-NEXT: store float 0.000000e+00, ptr addrspace(5) %tmp0
+define amdgpu_kernel void @store_private_from_flat(ptr %generic_scalar) #0 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(5)
+  store float 0.0, ptr addrspace(5) %tmp0
   ret void
 }
 
 ; optimized to global load/store.
 ; CHECK-LABEL: @load_store_global(
-; CHECK-NEXT: %val = load i32, i32 addrspace(1)* %input, align 4
-; CHECK-NEXT: store i32 %val, i32 addrspace(1)* %output, align 4
+; CHECK-NEXT: %val = load i32, ptr addrspace(1) %input, align 4
+; CHECK-NEXT: store i32 %val, ptr addrspace(1) %output, align 4
 ; CHECK-NEXT: ret void
-define amdgpu_kernel void @load_store_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(1)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+define amdgpu_kernel void @load_store_global(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(1) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(1) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; Optimized to group load/store.
 ; CHECK-LABEL: @load_store_group(
-; CHECK-NEXT: %val = load i32, i32 addrspace(3)* %input, align 4
-; CHECK-NEXT: store i32 %val, i32 addrspace(3)* %output, align 4
+; CHECK-NEXT: %val = load i32, ptr addrspace(3) %input, align 4
+; CHECK-NEXT: store i32 %val, ptr addrspace(3) %output, align 4
 ; CHECK-NEXT: ret void
-define amdgpu_kernel void @load_store_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(3)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(3)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+define amdgpu_kernel void @load_store_group(ptr addrspace(3) nocapture %input, ptr addrspace(3) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(3) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(3) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; Optimized to private load/store.
 ; CHECK-LABEL: @load_store_private(
-; CHECK-NEXT: %val = load i32, i32 addrspace(5)* %input, align 4
-; CHECK-NEXT: store i32 %val, i32 addrspace(5)* %output, align 4
+; CHECK-NEXT: %val = load i32, ptr addrspace(5) %input, align 4
+; CHECK-NEXT: store i32 %val, ptr addrspace(5) %output, align 4
 ; CHECK-NEXT: ret void
-define amdgpu_kernel void @load_store_private(i32 addrspace(5)* nocapture %input, i32 addrspace(5)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(5)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(5)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+define amdgpu_kernel void @load_store_private(ptr addrspace(5) nocapture %input, ptr addrspace(5) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(5) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(5) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; No optimization. flat load/store.
 ; CHECK-LABEL: @load_store_flat(
-; CHECK-NEXT: %val = load i32, i32* %input, align 4
-; CHECK-NEXT: store i32 %val, i32* %output, align 4
+; CHECK-NEXT: %val = load i32, ptr %input, align 4
+; CHECK-NEXT: store i32 %val, ptr %output, align 4
 ; CHECK-NEXT: ret void
-define amdgpu_kernel void @load_store_flat(i32* nocapture %input, i32* nocapture %output) #0 {
-  %val = load i32, i32* %input, align 4
-  store i32 %val, i32* %output, align 4
+define amdgpu_kernel void @load_store_flat(ptr nocapture %input, ptr nocapture %output) #0 {
+  %val = load i32, ptr %input, align 4
+  store i32 %val, ptr %output, align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_addrspacecast_ptr_value(
-; CHECK: %cast = addrspacecast i32 addrspace(1)* %input to i32*
-; CHECK-NEXT: store i32* %cast, i32* addrspace(1)* %output, align 4
-define amdgpu_kernel void @store_addrspacecast_ptr_value(i32 addrspace(1)* nocapture %input, i32* addrspace(1)* nocapture %output) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %input to i32*
-  store i32* %cast, i32* addrspace(1)* %output, align 4
+; CHECK: %cast = addrspacecast ptr addrspace(1) %input to ptr
+; CHECK-NEXT: store ptr %cast, ptr addrspace(1) %output, align 4
+define amdgpu_kernel void @store_addrspacecast_ptr_value(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
+  %cast = addrspacecast ptr addrspace(1) %input to ptr
+  store ptr %cast, ptr addrspace(1) %output, align 4
   ret void
 }
 
 ; CHECK-LABEL: @atomicrmw_add_global_to_flat(
-; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(1)* %global.ptr, i32 %y seq_cst
-define i32 @atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = atomicrmw add i32* %cast, i32 %y seq_cst
+; CHECK-NEXT: %ret = atomicrmw add ptr addrspace(1) %global.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = atomicrmw add ptr %cast, i32 %y seq_cst
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @atomicrmw_add_group_to_flat(
-; CHECK-NEXT: %ret = atomicrmw add i32 addrspace(3)* %group.ptr, i32 %y seq_cst
-define i32 @atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = atomicrmw add i32* %cast, i32 %y seq_cst
+; CHECK-NEXT: %ret = atomicrmw add ptr addrspace(3) %group.ptr, i32 %y seq_cst
+define i32 @atomicrmw_add_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = atomicrmw add ptr %cast, i32 %y seq_cst
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @cmpxchg_global_to_flat(
-; CHECK: %ret = cmpxchg i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
-define { i32, i1 } @cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = cmpxchg i32* %cast, i32 %cmp, i32 %val seq_cst monotonic
+; CHECK: %ret = cmpxchg ptr addrspace(1) %global.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_global_to_flat(ptr addrspace(1) %global.ptr, i32 %cmp, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = cmpxchg ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
   ret { i32, i1 } %ret
 }
 
 ; CHECK-LABEL: @cmpxchg_group_to_flat(
-; CHECK: %ret = cmpxchg i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
-define { i32, i1 } @cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = cmpxchg i32* %cast, i32 %cmp, i32 %val seq_cst monotonic
+; CHECK: %ret = cmpxchg ptr addrspace(3) %group.ptr, i32 %cmp, i32 %val seq_cst monotonic
+define { i32, i1 } @cmpxchg_group_to_flat(ptr addrspace(3) %group.ptr, i32 %cmp, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = cmpxchg ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
   ret { i32, i1 } %ret
 }
 
 ; Not pointer operand
 ; CHECK-LABEL: @cmpxchg_group_to_flat_wrong_operand(
-; CHECK: %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32*
-; CHECK: %ret = cmpxchg i32* addrspace(3)* %cas.ptr, i32* %cast.cmp, i32* %val seq_cst monotonic
-define { i32*, i1 } @cmpxchg_group_to_flat_wrong_operand(i32* addrspace(3)* %cas.ptr, i32 addrspace(3)* %cmp.ptr, i32* %val) #0 {
-  %cast.cmp = addrspacecast i32 addrspace(3)* %cmp.ptr to i32*
-  %ret = cmpxchg i32* addrspace(3)* %cas.ptr, i32* %cast.cmp, i32* %val seq_cst monotonic
-  ret { i32*, i1 } %ret
+; CHECK: %cast.cmp = addrspacecast ptr addrspace(3) %cmp.ptr to ptr
+; CHECK: %ret = cmpxchg ptr addrspace(3) %cas.ptr, ptr %cast.cmp, ptr %val seq_cst monotonic
+define { ptr, i1 } @cmpxchg_group_to_flat_wrong_operand(ptr addrspace(3) %cas.ptr, ptr addrspace(3) %cmp.ptr, ptr %val) #0 {
+  %cast.cmp = addrspacecast ptr addrspace(3) %cmp.ptr to ptr
+  %ret = cmpxchg ptr addrspace(3) %cas.ptr, ptr %cast.cmp, ptr %val seq_cst monotonic
+  ret { ptr, i1 } %ret
 }
 
 ; Null pointer in local addr space
 ; CHECK-LABEL: @local_nullptr
-; CHECK: icmp ne i8 addrspace(3)* %a, addrspacecast (i8 addrspace(5)* null to i8 addrspace(3)*)
-; CHECK-NOT: i8 addrspace(3)* null
-define void @local_nullptr(i32 addrspace(1)* nocapture %results, i8 addrspace(3)* %a) {
+; CHECK: icmp ne ptr addrspace(3) %a, addrspacecast (ptr addrspace(5) null to ptr addrspace(3))
+; CHECK-NOT: ptr addrspace(3) null
+define void @local_nullptr(ptr addrspace(1) nocapture %results, ptr addrspace(3) %a) {
 entry:
-  %tobool = icmp ne i8 addrspace(3)* %a, addrspacecast (i8 addrspace(5)* null to i8 addrspace(3)*)
+  %tobool = icmp ne ptr addrspace(3) %a, addrspacecast (ptr addrspace(5) null to ptr addrspace(3))
   %conv = zext i1 %tobool to i32
-  store i32 %conv, i32 addrspace(1)* %results, align 4
+  store i32 %conv, ptr addrspace(1) %results, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
index 33aa92e6b305e..de36f68c40bf1 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/builtin-assumed-addrspace.ll
@@ -1,93 +1,89 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -o - %s | FileCheck %s
 
 ; CHECK-LABEL: @f0
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
-; CHECK: getelementptr inbounds float, float addrspace(3)*
-; CHECK: load float, float addrspace(3)*
-define float @f0(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(3)
+; CHECK: getelementptr inbounds float, ptr addrspace(3)
+; CHECK: load float, ptr addrspace(3)
+define float @f0(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.amdgcn.is.shared(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @f1
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(5)*
-; CHECK: getelementptr inbounds float, float addrspace(5)*
-; CHECK: load float, float addrspace(5)*
-define float @f1(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(5)
+; CHECK: getelementptr inbounds float, ptr addrspace(5)
+; CHECK: load float, ptr addrspace(5)
+define float @f1(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.amdgcn.is.private(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.amdgcn.is.private(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @f2
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(1)*
-; CHECK: getelementptr inbounds float, float addrspace(1)*
-; CHECK: load float, float addrspace(1)*
-define float @f2(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(1)
+; CHECK: getelementptr inbounds float, ptr addrspace(1)
+; CHECK: load float, ptr addrspace(1)
+define float @f2(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.amdgcn.is.private(i8* %0)
-  %2 = xor i1 %1, -1
-  %3 = call i1 @llvm.amdgcn.is.shared(i8* %0)
-  %4 = xor i1 %3, -1
-  %5 = and i1 %2, %4
-  tail call void @llvm.assume(i1 %5)
-  %6 = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %idxprom = zext i32 %6 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %7 = load float, float* %arrayidx, align 4
-  ret float %7
+  %0 = call i1 @llvm.amdgcn.is.private(ptr %p)
+  %1 = xor i1 %0, -1
+  %2 = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  %3 = xor i1 %2, -1
+  %4 = and i1 %1, %3
+  tail call void @llvm.assume(i1 %4)
+  %5 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %5 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %6 = load float, ptr %arrayidx, align 4
+  ret float %6
 }
 
 ; CHECK-LABEL: @g0
 ; CHECK: if.then:
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
-; CHECK: getelementptr inbounds float, float addrspace(3)*
-; CHECK: load float, float addrspace(3)*
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(3)
+; CHECK: getelementptr inbounds float, ptr addrspace(3)
+; CHECK: load float, ptr addrspace(3)
 ; CHECK: if.end:
-; CHECK: getelementptr inbounds float, float*
-; CHECK: load float, float*
-define float @g0(i32 %c, float* %p) {
+; CHECK: getelementptr inbounds float, ptr
+; CHECK: load float, ptr
+define float @g0(i32 %c, ptr %p) {
 entry:
   %tobool.not = icmp eq i32 %c, 0
   br i1 %tobool.not, label %if.end, label %if.then
 
 if.then:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.amdgcn.is.shared(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  %add = fadd float %3, 0.
+  %0 = call i1 @llvm.amdgcn.is.shared(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  %add = fadd float %2, 0.
   br label %if.end
 
 if.end:
   %s = phi float [ %add, %if.then ], [ 0., %entry ]
-  %4 = tail call i32 @llvm.amdgcn.workitem.id.y()
-  %idxprom2 = zext i32 %4 to i64
-  %arrayidx2 = getelementptr inbounds float, float* %p, i64 %idxprom2
-  %5 = load float, float* %arrayidx2, align 4
-  %add2 = fadd float %s, %5
+  %3 = tail call i32 @llvm.amdgcn.workitem.id.y()
+  %idxprom2 = zext i32 %3 to i64
+  %arrayidx2 = getelementptr inbounds float, ptr %p, i64 %idxprom2
+  %4 = load float, ptr %arrayidx2, align 4
+  %add2 = fadd float %s, %4
   ret float %add2
 }
 
 declare void @llvm.assume(i1)
-declare i1 @llvm.amdgcn.is.shared(i8* nocapture)
-declare i1 @llvm.amdgcn.is.private(i8* nocapture)
+declare i1 @llvm.amdgcn.is.shared(ptr nocapture)
+declare i1 @llvm.amdgcn.is.private(ptr nocapture)
 declare i32 @llvm.amdgcn.workitem.id.x()
 declare i32 @llvm.amdgcn.workitem.id.y()

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/debug-info.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/debug-info.ll
index cd2d2e891f2b9..c8170e4f880c7 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/debug-info.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/debug-info.ll
@@ -5,72 +5,71 @@
 @lds = internal unnamed_addr addrspace(3) global [648 x double] undef, align 8
 
 ; CHECK-LABEL: @load_global_from_flat(
-; CHECK-NEXT: %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*, !dbg ![[DEBUG_LOC_TMP0:[0-9]+]]
-; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %tmp0, align 4, !dbg ![[DEBUG_LOC_TMP1:[0-9]+]]
+; CHECK-NEXT: %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1), !dbg ![[DEBUG_LOC_TMP0:[0-9]+]]
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(1) %tmp0, align 4, !dbg ![[DEBUG_LOC_TMP1:[0-9]+]]
 ; CHECK-NEXT: ret float %tmp1, !dbg ![[DEBUG_LOC_RET:[0-9]+]]
-define float @load_global_from_flat(float* %generic_scalar) #0 !dbg !5 {
-  %tmp0 = addrspacecast float* %generic_scalar to float addrspace(1)*, !dbg !8
-  %tmp1 = load float, float addrspace(1)* %tmp0, align 4, !dbg !9
+define float @load_global_from_flat(ptr %generic_scalar) #0 !dbg !5 {
+  %tmp0 = addrspacecast ptr %generic_scalar to ptr addrspace(1), !dbg !8
+  %tmp1 = load float, ptr addrspace(1) %tmp0, align 4, !dbg !9
   ret float %tmp1, !dbg !10
 }
 
 ; CHECK-LABEL: @simplified_constexpr_gep_addrspacecast(
-; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0, !dbg ![[DEBUG_LOC_GEP0:[0-9]+]]
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8, !dbg ![[DEBUG_LOC_STORE_GEP0:[0-9]+]]
+; CHECK: %gep0 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx0, !dbg ![[DEBUG_LOC_GEP0:[0-9]+]]
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep0, align 8, !dbg ![[DEBUG_LOC_STORE_GEP0:[0-9]+]]
 define void @simplified_constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) #0 !dbg !11 {
-  %gep0 = getelementptr inbounds double, double* addrspacecast (double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384) to double*), i64 %idx0, !dbg !12
-  %asc = addrspacecast double* %gep0 to double addrspace(3)*, !dbg !13
-  store double 1.000000e+00, double addrspace(3)* %asc, align 8, !dbg !14
+  %gep0 = getelementptr inbounds double, ptr addrspacecast (ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384) to ptr), i64 %idx0, !dbg !12
+  %asc = addrspacecast ptr %gep0 to ptr addrspace(3), !dbg !13
+  store double 1.000000e+00, ptr addrspace(3) %asc, align 8, !dbg !14
   ret void, !dbg !15
 }
 
 ; CHECK-LABEL: @objectsize_group_to_flat_i32(
-; CHECK: %val = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %group.ptr, i1 true, i1 false, i1 false), !dbg ![[DEBUG_LOC_VAL:[0-9]+]]
-define i32 @objectsize_group_to_flat_i32(i8 addrspace(3)* %group.ptr) #0 !dbg !16 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*, !dbg !17
-  %val = call i32 @llvm.objectsize.i32.p0i8(i8* %cast, i1 true, i1 false, i1 false), !dbg !18
+; CHECK: %val = call i32 @llvm.objectsize.i32.p3(ptr addrspace(3) %group.ptr, i1 true, i1 false, i1 false), !dbg ![[DEBUG_LOC_VAL:[0-9]+]]
+define i32 @objectsize_group_to_flat_i32(ptr addrspace(3) %group.ptr) #0 !dbg !16 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr, !dbg !17
+  %val = call i32 @llvm.objectsize.i32.p0(ptr %cast, i1 true, i1 false, i1 false), !dbg !18
   ret i32 %val, !dbg !19
 }
 
 ; CHECK-LABEL: @memset_group_to_flat(
-; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 4, i64 32, i1 false), !dbg ![[DEBUG_LOC_MEMSET_CAST:[0-9]+]]
-define amdgpu_kernel void @memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 !dbg !20 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*, !dbg !21
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 false), !dbg !22, !tbaa !23, !alias.scope !26, !noalias !29
+; CHECK: call void @llvm.memset.p3.i64(ptr addrspace(3) align 4 %group.ptr, i8 4, i64 32, i1 false), !dbg ![[DEBUG_LOC_MEMSET_CAST:[0-9]+]]
+define amdgpu_kernel void @memset_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 !dbg !20 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr, !dbg !21
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 false), !dbg !22, !tbaa !23, !alias.scope !26, !noalias !29
   ret void, !dbg !31
 }
 
 ; CHECK-LABEL: @ptrmask_cast_global_to_flat(
-; CHECK-NEXT:    [[PTRMASK:%.*]] = call i8 addrspace(1)* @llvm.ptrmask.p1i8.i64(i8 addrspace(1)* %src.ptr, i64 %mask), !dbg ![[DEBUG_LOC_PTRMASK:[0-9]+]]
-; CHECK-NEXT:    %load = load i8, i8 addrspace(1)* [[PTRMASK]], align 1, !dbg ![[DEBUG_LOC_LOAD:[0-9]+]]
-define i8 @ptrmask_cast_global_to_flat(i8 addrspace(1)* %src.ptr, i64 %mask) #0 !dbg !32 {
-  %cast = addrspacecast i8 addrspace(1)* %src.ptr to i8*, !dbg !33
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask), !dbg !34
-  %load = load i8, i8* %masked, !dbg !35
+; CHECK-NEXT:    [[PTRMASK:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) %src.ptr, i64 %mask), !dbg ![[DEBUG_LOC_PTRMASK:[0-9]+]]
+; CHECK-NEXT:    %load = load i8, ptr addrspace(1) [[PTRMASK]], align 1, !dbg ![[DEBUG_LOC_LOAD:[0-9]+]]
+define i8 @ptrmask_cast_global_to_flat(ptr addrspace(1) %src.ptr, i64 %mask) #0 !dbg !32 {
+  %cast = addrspacecast ptr addrspace(1) %src.ptr to ptr, !dbg !33
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask), !dbg !34
+  %load = load i8, ptr %masked, !dbg !35
   ret i8 %load, !dbg !36
 }
 
 ; the new addrspacecast gets the debug location from it user (in this case, the gep)
 ; CHECK-LABEL: @assume_addresspace(
-; CHECK: [[ASCAST:%.*]] = addrspacecast float* %p to float addrspace(3)*, !dbg ![[DEBUG_LOC_ARRAYIDX:[0-9]+]]
-; CHECK-NEXT: %arrayidx = getelementptr inbounds float, float addrspace(3)* [[ASCAST]], i64 %x64, !dbg ![[DEBUG_LOC_ARRAYIDX]]
-; CHECK-NEXT: %arrayidx.load = load float, float addrspace(3)* %arrayidx, align 4, !dbg ![[DEBUG_LOC_ARRAYIDX_LOAD:[0-9]+]]
-define float @assume_addresspace(float* %p) !dbg !37 {
+; CHECK: [[ASCAST:%.*]] = addrspacecast ptr %p to ptr addrspace(3), !dbg ![[DEBUG_LOC_ARRAYIDX:[0-9]+]]
+; CHECK-NEXT: %arrayidx = getelementptr inbounds float, ptr addrspace(3) [[ASCAST]], i64 %x64, !dbg ![[DEBUG_LOC_ARRAYIDX]]
+; CHECK-NEXT: %arrayidx.load = load float, ptr addrspace(3) %arrayidx, align 4, !dbg ![[DEBUG_LOC_ARRAYIDX_LOAD:[0-9]+]]
+define float @assume_addresspace(ptr %p) !dbg !37 {
 entry:
-  %cast = bitcast float* %p to i8*, !dbg !38
-  %is_shared = call i1 @llvm.amdgcn.is.shared(i8* %cast), !dbg !39
+  %is_shared = call i1 @llvm.amdgcn.is.shared(ptr %p), !dbg !39
   tail call void @llvm.assume(i1 %is_shared), !dbg !40
   %x32 = tail call i32 @llvm.amdgcn.workitem.id.x(), !dbg !41
   %x64 = zext i32 %x32 to i64, !dbg !42
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %x64, !dbg !43
-  %arrayidx.load = load float, float* %arrayidx, align 4, !dbg !44
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %x64, !dbg !43
+  %arrayidx.load = load float, ptr %arrayidx, align 4, !dbg !44
   ret float %arrayidx.load, !dbg !45
 }
 
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) #1
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #2
-declare i8* @llvm.ptrmask.p0i8.i64(i8*, i64) #3
-declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #4
+declare i32 @llvm.objectsize.i32.p0(ptr, i1 immarg, i1 immarg, i1 immarg) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
+declare ptr @llvm.ptrmask.p0.i64(ptr, i64) #3
+declare i1 @llvm.amdgcn.is.shared(ptr nocapture) #4
 declare i32 @llvm.amdgcn.workitem.id.x() #4
 declare void @llvm.assume(i1)
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
index 9fe7e86ec9b8b..5eb53ed2f7546 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -mcpu=gfx90a < %s | FileCheck %s
 
-declare double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* nocapture, double) #8
-declare double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* nocapture, double) #8
-declare double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* nocapture, double) #8
+declare double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr nocapture, double) #8
+declare double @llvm.amdgcn.flat.atomic.fmin.f64.p0.f64(ptr nocapture, double) #8
+declare double @llvm.amdgcn.flat.atomic.fmax.f64.p0.f64(ptr nocapture, double) #8
 
-define protected amdgpu_kernel void @InferNothing(i32 %a, double* %b, double %c) {
+define protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) {
 ; CHECK-LABEL: InferNothing:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x2c
@@ -24,13 +24,13 @@ define protected amdgpu_kernel void @InferNothing(i32 %a, double* %b, double %c)
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double* %b, i64 %i.2
-  %i.4 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %i.3, double %c) #8
+  %i.3 = getelementptr inbounds double, ptr %b, i64 %i.2
+  %i.4 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr %i.3, double %c) #8
   ret void
 }
 
 
-define protected amdgpu_kernel void @InferFadd(i32 %a, double addrspace(1)* %b, double %c) {
+define protected amdgpu_kernel void @InferFadd(i32 %a, ptr addrspace(1) %b, double %c) {
 ; CHECK-LABEL: InferFadd:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x24
@@ -48,13 +48,13 @@ define protected amdgpu_kernel void @InferFadd(i32 %a, double addrspace(1)* %b,
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double addrspace(1)* %b, i64 %i.2
-  %i.4 = addrspacecast double addrspace(1)* %i.3 to double*
-  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %i.4, double %c) #8
+  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
+  %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
+  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr %i.4, double %c) #8
   ret void
 }
 
-define protected amdgpu_kernel void @InferFmax(i32 %a, double addrspace(1)* %b, double %c) {
+define protected amdgpu_kernel void @InferFmax(i32 %a, ptr addrspace(1) %b, double %c) {
 ; CHECK-LABEL: InferFmax:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x24
@@ -72,13 +72,13 @@ define protected amdgpu_kernel void @InferFmax(i32 %a, double addrspace(1)* %b,
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double addrspace(1)* %b, i64 %i.2
-  %i.4 = addrspacecast double addrspace(1)* %i.3 to double*
-  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fmax.f64.p0f64.f64(double* %i.4, double %c) #8
+  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
+  %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
+  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fmax.f64.p0.f64(ptr %i.4, double %c) #8
   ret void
 }
 
-define protected amdgpu_kernel void @InferFmin(i32 %a, double addrspace(1)* %b, double %c) {
+define protected amdgpu_kernel void @InferFmin(i32 %a, ptr addrspace(1) %b, double %c) {
 ; CHECK-LABEL: InferFmin:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x24
@@ -96,13 +96,13 @@ define protected amdgpu_kernel void @InferFmin(i32 %a, double addrspace(1)* %b,
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double addrspace(1)* %b, i64 %i.2
-  %i.4 = addrspacecast double addrspace(1)* %i.3 to double*
-  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fmin.f64.p0f64.f64(double* %i.4, double %c) #8
+  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
+  %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
+  %i.5 = tail call contract double @llvm.amdgcn.flat.atomic.fmin.f64.p0.f64(ptr %i.4, double %c) #8
   ret void
 }
 
-define protected amdgpu_kernel void @InferMixed(i32 %a, double addrspace(1)* %b, double %c, double* %d) {
+define protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, double %c, ptr %d) {
 ; CHECK-LABEL: InferMixed:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x24
@@ -123,20 +123,20 @@ define protected amdgpu_kernel void @InferMixed(i32 %a, double addrspace(1)* %b,
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double addrspace(1)* %b, i64 %i.2
+  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
   br label %bb1
 
 bb1:
-  %i.7 = ptrtoint double addrspace(1)* %i.3 to i64
+  %i.7 = ptrtoint ptr addrspace(1) %i.3 to i64
   %i.8 = add nsw i64 %i.7, 1
-  %i.9 = inttoptr i64 %i.8 to double addrspace(1)*
-  %i.10 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double * %d, double %c) #23
-  %i.11 = addrspacecast double addrspace(1)* %i.9 to double*
-  %i.12 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %i.11, double %c) #23
+  %i.9 = inttoptr i64 %i.8 to ptr addrspace(1)
+  %i.10 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr %d, double %c) #23
+  %i.11 = addrspacecast ptr addrspace(1) %i.9 to ptr
+  %i.12 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr %i.11, double %c) #23
   ret void
 }
 
-define protected amdgpu_kernel void @InferPHI(i32 %a, double addrspace(1)* %b, double %c) {
+define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, double %c) {
 ; CHECK-LABEL: InferPHI:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    s_load_dword s2, s[0:1], 0x24
@@ -161,21 +161,21 @@ define protected amdgpu_kernel void @InferPHI(i32 %a, double addrspace(1)* %b, d
 entry:
   %i = add nsw i32 %a, -1
   %i.2 = sext i32 %i to i64
-  %i.3 = getelementptr inbounds double, double addrspace(1)* %b, i64 %i.2
-  %i.4 = ptrtoint double addrspace(1)* %i.3 to i64
+  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
+  %i.4 = ptrtoint ptr addrspace(1) %i.3 to i64
   br label %bb0
 
 bb0:
-  %phi = phi double addrspace(1)* [ %i.3, %entry ], [ %i.9, %bb0 ]
-  %i.7 = ptrtoint double addrspace(1)* %phi to i64
+  %phi = phi ptr addrspace(1) [ %i.3, %entry ], [ %i.9, %bb0 ]
+  %i.7 = ptrtoint ptr addrspace(1) %phi to i64
   %i.8 = sub nsw i64 %i.7, 1
   %cmp2 = icmp eq i64 %i.8, 0
-  %i.9 = inttoptr i64 %i.7 to double addrspace(1)*
+  %i.9 = inttoptr i64 %i.7 to ptr addrspace(1)
   br i1 %cmp2, label %bb1, label %bb0
 
 bb1:
-  %i.10 = addrspacecast double addrspace(1)* %i.9 to double*
-  %i.11 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0f64.f64(double* %i.10, double %c) #23
+  %i.10 = addrspacecast ptr addrspace(1) %i.9 to ptr
+  %i.11 = tail call contract double @llvm.amdgcn.flat.atomic.fadd.f64.p0.f64(ptr %i.10, double %c) #23
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
index 4829759340278..03fc645a72052 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/icmp.ll
@@ -1,60 +1,60 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
 
 ; CHECK-LABEL: @icmp_flat_cmp_self(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.0
-define i1 @icmp_flat_cmp_self(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, %cast0
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, %group.ptr.0
+define i1 @icmp_flat_cmp_self(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, %cast0
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_flat_flat_from_group(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, %group.ptr.1
-define i1 @icmp_flat_flat_from_group(i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %cmp = icmp eq i32* %cast0, %cast1
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, %group.ptr.1
+define i1 @icmp_flat_flat_from_group(ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %cmp = icmp eq ptr %cast0, %cast1
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_mismatch_flat_from_group_private(
-; CHECK: %cast0 = addrspacecast i32 addrspace(5)* %private.ptr.0 to i32*
-; CHECK: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-; CHECK: %cmp = icmp eq i32* %cast0, %cast1
-define i1 @icmp_mismatch_flat_from_group_private(i32 addrspace(5)* %private.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(5)* %private.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %cmp = icmp eq i32* %cast0, %cast1
+; CHECK: %cast0 = addrspacecast ptr addrspace(5) %private.ptr.0 to ptr
+; CHECK: %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+; CHECK: %cmp = icmp eq ptr %cast0, %cast1
+define i1 @icmp_mismatch_flat_from_group_private(ptr addrspace(5) %private.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(5) %private.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %cmp = icmp eq ptr %cast0, %cast1
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_flat_group_flat(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %cmp = icmp eq i32* %cast0, %flat.ptr.1
-define i1 @icmp_flat_group_flat(i32 addrspace(3)* %group.ptr.0, i32* %flat.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, %flat.ptr.1
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %cmp = icmp eq ptr %cast0, %flat.ptr.1
+define i1 @icmp_flat_group_flat(ptr addrspace(3) %group.ptr.0, ptr %flat.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, %flat.ptr.1
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_flat_flat_group(
-; CHECK: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-; CHECK: %cmp = icmp eq i32* %flat.ptr.0, %cast1
-define i1 @icmp_flat_flat_group(i32* %flat.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %cmp = icmp eq i32* %flat.ptr.0, %cast1
+; CHECK: %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+; CHECK: %cmp = icmp eq ptr %flat.ptr.0, %cast1
+define i1 @icmp_flat_flat_group(ptr %flat.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %cmp = icmp eq ptr %flat.ptr.0, %cast1
   ret i1 %cmp
 }
 
 ; Keeping as cmp addrspace(3)* is better
 ; CHECK-LABEL: @icmp_flat_to_group_cmp(
-; CHECK: %cast0 = addrspacecast i32* %flat.ptr.0 to i32 addrspace(3)*
-; CHECK: %cast1 = addrspacecast i32* %flat.ptr.1 to i32 addrspace(3)*
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
-define i1 @icmp_flat_to_group_cmp(i32* %flat.ptr.0, i32* %flat.ptr.1) #0 {
-  %cast0 = addrspacecast i32* %flat.ptr.0 to i32 addrspace(3)*
-  %cast1 = addrspacecast i32* %flat.ptr.1 to i32 addrspace(3)*
-  %cmp = icmp eq i32 addrspace(3)* %cast0, %cast1
+; CHECK: %cast0 = addrspacecast ptr %flat.ptr.0 to ptr addrspace(3)
+; CHECK: %cast1 = addrspacecast ptr %flat.ptr.1 to ptr addrspace(3)
+; CHECK: %cmp = icmp eq ptr addrspace(3) %cast0, %cast1
+define i1 @icmp_flat_to_group_cmp(ptr %flat.ptr.0, ptr %flat.ptr.1) #0 {
+  %cast0 = addrspacecast ptr %flat.ptr.0 to ptr addrspace(3)
+  %cast1 = addrspacecast ptr %flat.ptr.1 to ptr addrspace(3)
+  %cmp = icmp eq ptr addrspace(3) %cast0, %cast1
   ret i1 %cmp
 }
 
@@ -62,35 +62,35 @@ define i1 @icmp_flat_to_group_cmp(i32* %flat.ptr.0, i32* %flat.ptr.1) #0 {
 ; constant cast if this is OK to change if 0 is a valid pointer.
 
 ; CHECK-LABEL: @icmp_group_flat_cmp_null(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32* null to i32 addrspace(3)*)
-define i1 @icmp_group_flat_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, null
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, addrspacecast (ptr null to ptr addrspace(3))
+define i1 @icmp_group_flat_cmp_null(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, null
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_group_flat_cmp_constant_inttoptr(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, addrspacecast (i32* inttoptr (i64 400 to i32*) to i32 addrspace(3)*)
-define i1 @icmp_group_flat_cmp_constant_inttoptr(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, inttoptr (i64 400 to i32*)
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, addrspacecast (ptr inttoptr (i64 400 to ptr) to ptr addrspace(3))
+define i1 @icmp_group_flat_cmp_constant_inttoptr(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, inttoptr (i64 400 to ptr)
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %cmp = icmp eq i32* %cast0, addrspacecast (i32 addrspace(5)* null to i32*)
-define i1 @icmp_mismatch_flat_group_private_cmp_null(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, addrspacecast (i32 addrspace(5)* null to i32*)
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(5) null to ptr)
+define i1 @icmp_mismatch_flat_group_private_cmp_null(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(5) null to ptr)
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
-define i1 @icmp_mismatch_flat_group_private_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, addrspacecast (i32 addrspace(5)* undef to i32*)
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, undef
+define i1 @icmp_mismatch_flat_group_private_cmp_undef(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(5) undef to ptr)
   ret i1 %cmp
 }
 
@@ -98,62 +98,62 @@ define i1 @icmp_mismatch_flat_group_private_cmp_undef(i32 addrspace(3)* %group.p
 @global0 = internal addrspace(1) global i32 0, align 4
 
 ; CHECK-LABEL: @icmp_mismatch_flat_group_global_cmp_gv(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %cmp = icmp eq i32* %cast0, addrspacecast (i32 addrspace(1)* @global0 to i32*)
-define i1 @icmp_mismatch_flat_group_global_cmp_gv(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, addrspacecast (i32 addrspace(1)* @global0 to i32*)
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(1) @global0 to ptr)
+define i1 @icmp_mismatch_flat_group_global_cmp_gv(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, addrspacecast (ptr addrspace(1) @global0 to ptr)
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_mismatch_group_global_cmp_gv_gv(
-; CHECK: %cmp = icmp eq i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), addrspacecast (i32 addrspace(1)* @global0 to i32*)
-define i1 @icmp_mismatch_group_global_cmp_gv_gv(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cmp = icmp eq i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), addrspacecast (i32 addrspace(1)* @global0 to i32*)
+; CHECK: %cmp = icmp eq ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), addrspacecast (ptr addrspace(1) @global0 to ptr)
+define i1 @icmp_mismatch_group_global_cmp_gv_gv(ptr addrspace(3) %group.ptr.0) #0 {
+  %cmp = icmp eq ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), addrspacecast (ptr addrspace(1) @global0 to ptr)
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_group_flat_cmp_undef(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* %group.ptr.0, undef
-define i1 @icmp_group_flat_cmp_undef(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* %cast0, undef
+; CHECK: %cmp = icmp eq ptr addrspace(3) %group.ptr.0, undef
+define i1 @icmp_group_flat_cmp_undef(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr %cast0, undef
   ret i1 %cmp
 }
 
 ; Test non-canonical orders
 ; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_null_swap(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %cmp = icmp eq i32* addrspacecast (i32 addrspace(5)* null to i32*), %cast0
-define i1 @icmp_mismatch_flat_group_private_cmp_null_swap(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* addrspacecast (i32 addrspace(5)* null to i32*), %cast0
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %cmp = icmp eq ptr addrspacecast (ptr addrspace(5) null to ptr), %cast0
+define i1 @icmp_mismatch_flat_group_private_cmp_null_swap(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr addrspacecast (ptr addrspace(5) null to ptr), %cast0
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_group_flat_cmp_undef_swap(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
-define i1 @icmp_group_flat_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* undef, %cast0
+; CHECK: %cmp = icmp eq ptr addrspace(3) undef, %group.ptr.0
+define i1 @icmp_group_flat_cmp_undef_swap(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr undef, %cast0
   ret i1 %cmp
 }
 
 ; CHECK-LABEL: @icmp_mismatch_flat_group_private_cmp_undef_swap(
-; CHECK: %cmp = icmp eq i32 addrspace(3)* undef, %group.ptr.0
-define i1 @icmp_mismatch_flat_group_private_cmp_undef_swap(i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cmp = icmp eq i32* addrspacecast (i32 addrspace(5)* undef to i32*), %cast0
+; CHECK: %cmp = icmp eq ptr addrspace(3) undef, %group.ptr.0
+define i1 @icmp_mismatch_flat_group_private_cmp_undef_swap(ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cmp = icmp eq ptr addrspacecast (ptr addrspace(5) undef to ptr), %cast0
   ret i1 %cmp
 }
 
 ; TODO: Should be handled
 ; CHECK-LABEL: @icmp_flat_flat_from_group_vector(
-; CHECK: %cmp = icmp eq <2 x i32*> %cast0, %cast1
-define <2 x i1> @icmp_flat_flat_from_group_vector(<2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
-  %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32*>
-  %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32*>
-  %cmp = icmp eq <2 x i32*> %cast0, %cast1
+; CHECK: %cmp = icmp eq <2 x ptr> %cast0, %cast1
+define <2 x i1> @icmp_flat_flat_from_group_vector(<2 x ptr addrspace(3)> %group.ptr.0, <2 x ptr addrspace(3)> %group.ptr.1) #0 {
+  %cast0 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.0 to <2 x ptr>
+  %cast1 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.1 to <2 x ptr>
+  %cmp = icmp eq <2 x ptr> %cast0, %cast1
   ret <2 x i1> %cmp
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
index 3116fce33d0af..2f6cab0fb7330 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-address-space.ll
@@ -6,163 +6,160 @@
 @array = internal addrspace(3) global [10 x float] zeroinitializer, align 4
 
 ; CHECK-LABEL: @load_store_lds_f32(
-; CHECK: %tmp = load float, float addrspace(3)* @scalar, align 4
+; CHECK: %tmp = load float, ptr addrspace(3) @scalar, align 4
 ; CHECK: call void @use(float %tmp)
-; CHECK: store float %v, float addrspace(3)* @scalar, align 4
+; CHECK: store float %v, ptr addrspace(3) @scalar, align 4
 ; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp2 = load float, float addrspace(3)* @scalar, align 4
+; CHECK: %tmp2 = load float, ptr addrspace(3) @scalar, align 4
 ; CHECK: call void @use(float %tmp2)
-; CHECK: store float %v, float addrspace(3)* @scalar, align 4
+; CHECK: store float %v, ptr addrspace(3) @scalar, align 4
 ; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp3 = load float, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
+; CHECK: %tmp3 = load float, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
 ; CHECK: call void @use(float %tmp3)
-; CHECK: store float %v, float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5), align 4
+; CHECK: store float %v, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i32 0, i32 5), align 4
 ; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp4 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 5
-; CHECK: %tmp5 = load float, float addrspace(3)* %tmp4, align 4
+; CHECK: %tmp4 = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 5
+; CHECK: %tmp5 = load float, ptr addrspace(3) %tmp4, align 4
 ; CHECK: call void @use(float %tmp5)
-; CHECK: store float %v, float addrspace(3)* %tmp4, align 4
+; CHECK: store float %v, ptr addrspace(3) %tmp4, align 4
 ; CHECK: call void @llvm.amdgcn.s.barrier()
-; CHECK: %tmp7 = getelementptr inbounds [10 x float], [10 x float] addrspace(3)* @array, i32 0, i32 %i
-; CHECK: %tmp8 = load float, float addrspace(3)* %tmp7, align 4
+; CHECK: %tmp7 = getelementptr inbounds [10 x float], ptr addrspace(3) @array, i32 0, i32 %i
+; CHECK: %tmp8 = load float, ptr addrspace(3) %tmp7, align 4
 ; CHECK: call void @use(float %tmp8)
-; CHECK: store float %v, float addrspace(3)* %tmp7, align 4
+; CHECK: store float %v, ptr addrspace(3) %tmp7, align 4
 ; CHECK: call void @llvm.amdgcn.s.barrier()
 ; CHECK: ret void
 define amdgpu_kernel void @load_store_lds_f32(i32 %i, float %v) #0 {
 bb:
-  %tmp = load float, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
+  %tmp = load float, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
   call void @use(float %tmp)
-  store float %v, float* addrspacecast (float addrspace(3)* @scalar to float*), align 4
+  store float %v, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
   call void @llvm.amdgcn.s.barrier()
-  %tmp1 = addrspacecast float addrspace(3)* @scalar to float*
-  %tmp2 = load float, float* %tmp1, align 4
+  %tmp1 = addrspacecast ptr addrspace(3) @scalar to ptr
+  %tmp2 = load float, ptr %tmp1, align 4
   call void @use(float %tmp2)
-  store float %v, float* %tmp1, align 4
+  store float %v, ptr %tmp1, align 4
   call void @llvm.amdgcn.s.barrier()
-  %tmp3 = load float, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
+  %tmp3 = load float, ptr getelementptr inbounds ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5), align 4
   call void @use(float %tmp3)
-  store float %v, float* getelementptr inbounds ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5), align 4
+  store float %v, ptr getelementptr inbounds ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5), align 4
   call void @llvm.amdgcn.s.barrier()
-  %tmp4 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
-  %tmp5 = load float, float* %tmp4, align 4
+  %tmp4 = getelementptr inbounds [10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i32 0, i32 5
+  %tmp5 = load float, ptr %tmp4, align 4
   call void @use(float %tmp5)
-  store float %v, float* %tmp4, align 4
+  store float %v, ptr %tmp4, align 4
   call void @llvm.amdgcn.s.barrier()
-  %tmp6 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
-  %tmp7 = getelementptr inbounds [10 x float], [10 x float]* %tmp6, i32 0, i32 %i
-  %tmp8 = load float, float* %tmp7, align 4
+  %tmp6 = addrspacecast ptr addrspace(3) @array to ptr
+  %tmp7 = getelementptr inbounds [10 x float], ptr %tmp6, i32 0, i32 %i
+  %tmp8 = load float, ptr %tmp7, align 4
   call void @use(float %tmp8)
-  store float %v, float* %tmp7, align 4
+  store float %v, ptr %tmp7, align 4
   call void @llvm.amdgcn.s.barrier()
   ret void
 }
 
 ; CHECK-LABEL: @constexpr_load_int_from_float_lds(
-; CHECK: %tmp = load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*), align 4
+; CHECK: %tmp = load i32, ptr addrspace(3) @scalar, align 4
 define i32 @constexpr_load_int_from_float_lds() #0 {
 bb:
-  %tmp = load i32, i32* addrspacecast (i32 addrspace(3)* bitcast (float addrspace(3)* @scalar to i32 addrspace(3)*) to i32*), align 4
+  %tmp = load i32, ptr addrspacecast (ptr addrspace(3) @scalar to ptr), align 4
   ret i32 %tmp
 }
 
 ; CHECK-LABEL: @load_int_from_global_float(
-; CHECK: %tmp1 = getelementptr float, float addrspace(1)* %input, i32 %i
-; CHECK: %tmp2 = getelementptr float, float addrspace(1)* %tmp1, i32 %j
-; CHECK: %tmp3 = bitcast float addrspace(1)* %tmp2 to i32 addrspace(1)*
-; CHECK: %tmp4 = load i32, i32 addrspace(1)* %tmp3
+; CHECK: %tmp1 = getelementptr float, ptr addrspace(1) %input, i32 %i
+; CHECK: %tmp2 = getelementptr float, ptr addrspace(1) %tmp1, i32 %j
+; CHECK: %tmp4 = load i32, ptr addrspace(1) %tmp2
 ; CHECK: ret i32 %tmp4
-define i32 @load_int_from_global_float(float addrspace(1)* %input, i32 %i, i32 %j) #0 {
+define i32 @load_int_from_global_float(ptr addrspace(1) %input, i32 %i, i32 %j) #0 {
 bb:
-  %tmp = addrspacecast float addrspace(1)* %input to float*
-  %tmp1 = getelementptr float, float* %tmp, i32 %i
-  %tmp2 = getelementptr float, float* %tmp1, i32 %j
-  %tmp3 = bitcast float* %tmp2 to i32*
-  %tmp4 = load i32, i32* %tmp3
+  %tmp = addrspacecast ptr addrspace(1) %input to ptr
+  %tmp1 = getelementptr float, ptr %tmp, i32 %i
+  %tmp2 = getelementptr float, ptr %tmp1, i32 %j
+  %tmp4 = load i32, ptr %tmp2
   ret i32 %tmp4
 }
 
 ; CHECK-LABEL: @nested_const_expr(
-; CHECK: store i32 1, i32 addrspace(3)* bitcast (float addrspace(3)* getelementptr inbounds ([10 x float], [10 x float] addrspace(3)* @array, i64 0, i64 1) to i32 addrspace(3)*), align 4
+; CHECK: store i32 1, ptr addrspace(3) getelementptr inbounds ([10 x float], ptr addrspace(3) @array, i64 0, i64 1), align 4
 define amdgpu_kernel void @nested_const_expr() #0 {
-  store i32 1, i32* bitcast (float* getelementptr ([10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i64 0, i64 1) to i32*), align 4
+  store i32 1, ptr bitcast (ptr getelementptr ([10 x float], ptr addrspacecast (ptr addrspace(3) @array to ptr), i64 0, i64 1) to ptr), align 4
+
   ret void
 }
 
 ; CHECK-LABEL: @rauw(
-; CHECK: %addr = getelementptr float, float addrspace(1)* %input, i64 10
-; CHECK-NEXT: %v = load float, float addrspace(1)* %addr
-; CHECK-NEXT: store float %v, float addrspace(1)* %addr
+; CHECK: %addr = getelementptr float, ptr addrspace(1) %input, i64 10
+; CHECK-NEXT: %v = load float, ptr addrspace(1) %addr
+; CHECK-NEXT: store float %v, ptr addrspace(1) %addr
 ; CHECK-NEXT: ret void
-define amdgpu_kernel void @rauw(float addrspace(1)* %input) #0 {
+define amdgpu_kernel void @rauw(ptr addrspace(1) %input) #0 {
 bb:
-  %generic_input = addrspacecast float addrspace(1)* %input to float*
-  %addr = getelementptr float, float* %generic_input, i64 10
-  %v = load float, float* %addr
-  store float %v, float* %addr
+  %generic_input = addrspacecast ptr addrspace(1) %input to ptr
+  %addr = getelementptr float, ptr %generic_input, i64 10
+  %v = load float, ptr %addr
+  store float %v, ptr %addr
   ret void
 }
 
 ; FIXME: Should be able to eliminate the cast inside the loop
 ; CHECK-LABEL: @loop(
 
-; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
-; CHECK: %end = getelementptr float, float addrspace(3)* %p, i64 10
+; CHECK: %end = getelementptr float, ptr addrspace(3) @array, i64 10
 ; CHECK: br label %loop
 
 ; CHECK: loop:                                             ; preds = %loop, %entry
-; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
-; CHECK: %v = load float, float addrspace(3)* %i
+; CHECK: %i = phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
+; CHECK: %v = load float, ptr addrspace(3) %i
 ; CHECK: call void @use(float %v)
-; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
-; CHECK: %exit_cond = icmp eq float addrspace(3)* %i2, %end
+; CHECK: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
+; CHECK: %exit_cond = icmp eq ptr addrspace(3) %i2, %end
 
 ; CHECK: br i1 %exit_cond, label %exit, label %loop
 define amdgpu_kernel void @loop() #0 {
 entry:
-  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
-  %end = getelementptr float, float* %p, i64 10
+  %p = addrspacecast ptr addrspace(3) @array to ptr
+  %end = getelementptr float, ptr %p, i64 10
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
-  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
-  %v = load float, float* %i
+  %i = phi ptr [ %p, %entry ], [ %i2, %loop ]
+  %v = load float, ptr %i
   call void @use(float %v)
-  %i2 = getelementptr float, float* %i, i64 1
-  %exit_cond = icmp eq float* %i2, %end
+  %i2 = getelementptr float, ptr %i, i64 1
+  %exit_cond = icmp eq ptr %i2, %end
   br i1 %exit_cond, label %exit, label %loop
 
 exit:                                             ; preds = %loop
   ret void
 }
 
- at generic_end = external addrspace(1) global float*
+ at generic_end = external addrspace(1) global ptr
 
 ; CHECK-LABEL: @loop_with_generic_bound(
-; CHECK: %p = bitcast [10 x float] addrspace(3)* @array to float addrspace(3)*
-; CHECK: %end = load float*, float* addrspace(1)* @generic_end
+; CHECK: %end = load ptr, ptr addrspace(1) @generic_end
 ; CHECK: br label %loop
 
 ; CHECK: loop:
-; CHECK: %i = phi float addrspace(3)* [ %p, %entry ], [ %i2, %loop ]
-; CHECK: %v = load float, float addrspace(3)* %i
+; CHECK: %i = phi ptr addrspace(3) [ @array, %entry ], [ %i2, %loop ]
+; CHECK: %v = load float, ptr addrspace(3) %i
 ; CHECK: call void @use(float %v)
-; CHECK: %i2 = getelementptr float, float addrspace(3)* %i, i64 1
-; CHECK: %0 = addrspacecast float addrspace(3)* %i2 to float*
-; CHECK: %exit_cond = icmp eq float* %0, %end
+; CHECK: %i2 = getelementptr float, ptr addrspace(3) %i, i64 1
+; CHECK: %0 = addrspacecast ptr addrspace(3) %i2 to ptr
+; CHECK: %exit_cond = icmp eq ptr %0, %end
 ; CHECK: br i1 %exit_cond, label %exit, label %loop
 define amdgpu_kernel void @loop_with_generic_bound() #0 {
 entry:
-  %p = addrspacecast [10 x float] addrspace(3)* @array to float*
-  %end = load float*, float* addrspace(1)* @generic_end
+  %p = addrspacecast ptr addrspace(3) @array to ptr
+  %end = load ptr, ptr addrspace(1) @generic_end
   br label %loop
 
 loop:                                             ; preds = %loop, %entry
-  %i = phi float* [ %p, %entry ], [ %i2, %loop ]
-  %v = load float, float* %i
+  %i = phi ptr [ %p, %entry ], [ %i2, %loop ]
+  %v = load float, ptr %i
   call void @use(float %v)
-  %i2 = getelementptr float, float* %i, i64 1
-  %exit_cond = icmp eq float* %i2, %end
+  %i2 = getelementptr float, ptr %i, i64 1
+  %exit_cond = icmp eq ptr %i2, %end
   br i1 %exit_cond, label %exit, label %loop
 
 exit:                                             ; preds = %loop
@@ -170,11 +167,11 @@ exit:                                             ; preds = %loop
 }
 
 ; CHECK-LABEL: @select_bug(
-; CHECK: %add.ptr157 = getelementptr inbounds i64, i64* undef, i64 select (i1 icmp ne (i32* inttoptr (i64 4873 to i32*), i32* null), i64 73, i64 93)
-; CHECK: %cmp169 = icmp uge i64* undef, %add.ptr157
+; CHECK: %add.ptr157 = getelementptr inbounds i64, ptr undef, i64 select (i1 icmp ne (ptr inttoptr (i64 4873 to ptr), ptr null), i64 73, i64 93)
+; CHECK: %cmp169 = icmp uge ptr undef, %add.ptr157
 define void @select_bug() #0 {
-  %add.ptr157 = getelementptr inbounds i64, i64* undef, i64 select (i1 icmp ne (i32* inttoptr (i64 4873 to i32*), i32* null), i64 73, i64 93)
-  %cmp169 = icmp uge i64* undef, %add.ptr157
+  %add.ptr157 = getelementptr inbounds i64, ptr undef, i64 select (i1 icmp ne (ptr inttoptr (i64 4873 to ptr), ptr null), i64 73, i64 93)
+  %cmp169 = icmp uge ptr undef, %add.ptr157
   unreachable
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll
index e1bc9258b92a7..dc1e348c51cab 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll
@@ -4,53 +4,52 @@
 ; a memory operation are inferred.
 
 ; CHECK-LABEL: @addrspacecast_gep_addrspacecast(
-; CHECK: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
-; CHECK-NEXT: store i32 8, i32 addrspace(3)* %gep0, align 8
+; CHECK: %gep0 = getelementptr i32, ptr addrspace(3) %ptr, i64 9
+; CHECK-NEXT: store i32 8, ptr addrspace(3) %gep0, align 8
 ; CHECK-NEXT: ret void
-define void @addrspacecast_gep_addrspacecast(i32 addrspace(3)* %ptr) {
-  %asc0 = addrspacecast i32 addrspace(3)* %ptr to i32*
-  %gep0 = getelementptr i32, i32* %asc0, i64 9
-  %asc1 = addrspacecast i32* %gep0 to i32 addrspace(3)*
-  store i32 8, i32 addrspace(3)* %asc1, align 8
+define void @addrspacecast_gep_addrspacecast(ptr addrspace(3) %ptr) {
+  %asc0 = addrspacecast ptr addrspace(3) %ptr to ptr
+  %gep0 = getelementptr i32, ptr %asc0, i64 9
+  %asc1 = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store i32 8, ptr addrspace(3) %asc1, align 8
   ret void
 }
 
 ; CHECK-LABEL: @addrspacecast_
diff erent_pointee_type(
-; CHECK: [[GEP:%.*]] = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
-; CHECK: [[CAST:%.*]] = bitcast i32 addrspace(3)* [[GEP]] to i8 addrspace(3)*
-; CHECK-NEXT: store i8 8, i8 addrspace(3)* [[CAST]], align 8
+; CHECK: [[GEP:%.*]] = getelementptr i32, ptr addrspace(3) %ptr, i64 9
+; CHECK-NEXT: store i8 8, ptr addrspace(3) [[GEP]], align 8
 ; CHECK-NEXT: ret void
-define void @addrspacecast_
diff erent_pointee_type(i32 addrspace(3)* %ptr) {
-  %asc0 = addrspacecast i32 addrspace(3)* %ptr to i32*
-  %gep0 = getelementptr i32, i32* %asc0, i64 9
-  %asc1 = addrspacecast i32* %gep0 to i8 addrspace(3)*
-  store i8 8, i8 addrspace(3)* %asc1, align 8
+define void @addrspacecast_
diff erent_pointee_type(ptr addrspace(3) %ptr) {
+  %asc0 = addrspacecast ptr addrspace(3) %ptr to ptr
+  %gep0 = getelementptr i32, ptr %asc0, i64 9
+  %asc1 = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store i8 8, ptr addrspace(3) %asc1, align 8
   ret void
 }
 
 ; CHECK-LABEL: @addrspacecast_to_memory(
-; CHECK: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
-; CHECK-NEXT: store volatile i32 addrspace(3)* %gep0, i32 addrspace(3)* addrspace(1)* undef
+; CHECK: %gep0 = getelementptr i32, ptr addrspace(3) %ptr, i64 9
+; CHECK-NEXT: store volatile ptr addrspace(3) %gep0, ptr addrspace(1) undef
 ; CHECK-NEXT: ret void
-define void @addrspacecast_to_memory(i32 addrspace(3)* %ptr) {
-  %asc0 = addrspacecast i32 addrspace(3)* %ptr to i32*
-  %gep0 = getelementptr i32, i32* %asc0, i64 9
-  %asc1 = addrspacecast i32* %gep0 to i32 addrspace(3)*
-  store volatile i32 addrspace(3)* %asc1, i32 addrspace(3)* addrspace(1)* undef
+define void @addrspacecast_to_memory(ptr addrspace(3) %ptr) {
+  %asc0 = addrspacecast ptr addrspace(3) %ptr to ptr
+  %gep0 = getelementptr i32, ptr %asc0, i64 9
+  %asc1 = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store volatile ptr addrspace(3) %asc1, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK-LABEL: @multiuse_addrspacecast_gep_addrspacecast(
-; CHECK: %asc0 = addrspacecast i32 addrspace(3)* %ptr to i32*
-; CHECK-NEXT: store volatile i32* %asc0, i32* addrspace(1)* undef
-; CHECK-NEXT: %gep0 = getelementptr i32, i32 addrspace(3)* %ptr, i64 9
-; CHECK-NEXT: store i32 8, i32 addrspace(3)* %gep0, align 8
+; CHECK: %asc0 = addrspacecast ptr addrspace(3) %ptr to ptr
+; CHECK-NEXT: store volatile ptr %asc0, ptr addrspace(1) undef
+; CHECK-NEXT: %gep0 = getelementptr i32, ptr addrspace(3) %ptr, i64 9
+; CHECK-NEXT: store i32 8, ptr addrspace(3) %gep0, align 8
 ; CHECK-NEXT: ret void
-define void @multiuse_addrspacecast_gep_addrspacecast(i32 addrspace(3)* %ptr) {
-  %asc0 = addrspacecast i32 addrspace(3)* %ptr to i32*
-  store volatile i32* %asc0, i32* addrspace(1)* undef
-  %gep0 = getelementptr i32, i32* %asc0, i64 9
-  %asc1 = addrspacecast i32* %gep0 to i32 addrspace(3)*
-  store i32 8, i32 addrspace(3)* %asc1, align 8
+define void @multiuse_addrspacecast_gep_addrspacecast(ptr addrspace(3) %ptr) {
+  %asc0 = addrspacecast ptr addrspace(3) %ptr to ptr
+  store volatile ptr %asc0, ptr addrspace(1) undef
+  %gep0 = getelementptr i32, ptr %asc0, i64 9
+  %asc1 = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store i32 8, ptr addrspace(3) %asc1, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll
index e5010104e8885..6b8de774d12a0 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll
@@ -6,78 +6,78 @@
 @lds = internal unnamed_addr addrspace(3) global [648 x double] undef, align 8
 
 ; CHECK-LABEL: @simplified_constexpr_gep_addrspacecast(
-; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
+; CHECK: %gep0 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx0
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep0, align 8
 define void @simplified_constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
-  %gep0 = getelementptr inbounds double, double* addrspacecast (double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384) to double*), i64 %idx0
-  %asc = addrspacecast double* %gep0 to double addrspace(3)*
-  store double 1.000000e+00, double addrspace(3)* %asc, align 8
+  %gep0 = getelementptr inbounds double, ptr addrspacecast (ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384) to ptr), i64 %idx0
+  %asc = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store double 1.000000e+00, ptr addrspace(3) %asc, align 8
   ret void
 }
 
 ; CHECK-LABEL: @constexpr_gep_addrspacecast(
-; CHECK-NEXT: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
+; CHECK-NEXT: %gep0 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx0
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep0, align 8
 define void @constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
-  %gep0 = getelementptr inbounds double, double* getelementptr ([648 x double], [648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*), i64 0, i64 384), i64 %idx0
-  %asc = addrspacecast double* %gep0 to double addrspace(3)*
-  store double 1.0, double addrspace(3)* %asc, align 8
+  %gep0 = getelementptr inbounds double, ptr getelementptr ([648 x double], ptr addrspacecast (ptr addrspace(3) @lds to ptr), i64 0, i64 384), i64 %idx0
+  %asc = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store double 1.0, ptr addrspace(3) %asc, align 8
   ret void
 }
 
 ; CHECK-LABEL: @constexpr_gep_gep_addrspacecast(
-; CHECK: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
-; CHECK-NEXT: %gep1 = getelementptr inbounds double, double addrspace(3)* %gep0, i64 %idx1
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep1, align 8
+; CHECK: %gep0 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx0
+; CHECK-NEXT: %gep1 = getelementptr inbounds double, ptr addrspace(3) %gep0, i64 %idx1
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep1, align 8
 define void @constexpr_gep_gep_addrspacecast(i64 %idx0, i64 %idx1) {
-  %gep0 = getelementptr inbounds double, double* getelementptr ([648 x double], [648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*), i64 0, i64 384), i64 %idx0
-  %gep1 = getelementptr inbounds double, double* %gep0, i64 %idx1
-  %asc = addrspacecast double* %gep1 to double addrspace(3)*
-  store double 1.0, double addrspace(3)* %asc, align 8
+  %gep0 = getelementptr inbounds double, ptr getelementptr ([648 x double], ptr addrspacecast (ptr addrspace(3) @lds to ptr), i64 0, i64 384), i64 %idx0
+  %gep1 = getelementptr inbounds double, ptr %gep0, i64 %idx1
+  %asc = addrspacecast ptr %gep1 to ptr addrspace(3)
+  store double 1.0, ptr addrspace(3) %asc, align 8
   ret void
 }
 
 ; Don't crash
 ; CHECK-LABEL: @vector_gep(
-; CHECK: %cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32]*>
-define amdgpu_kernel void @vector_gep(<4 x [1024 x i32] addrspace(3)*> %array) nounwind {
-  %cast = addrspacecast <4 x [1024 x i32] addrspace(3)*> %array to <4 x [1024 x i32]*>
-  %p = getelementptr [1024 x i32], <4 x [1024 x i32]*> %cast, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
-  %p0 = extractelement <4 x i32*> %p, i32 0
-  %p1 = extractelement <4 x i32*> %p, i32 1
-  %p2 = extractelement <4 x i32*> %p, i32 2
-  %p3 = extractelement <4 x i32*> %p, i32 3
-  store i32 99, i32* %p0
-  store i32 99, i32* %p1
-  store i32 99, i32* %p2
-  store i32 99, i32* %p3
+; CHECK: %cast = addrspacecast <4 x ptr addrspace(3)> %array to <4 x ptr>
+define amdgpu_kernel void @vector_gep(<4 x ptr addrspace(3)> %array) nounwind {
+  %cast = addrspacecast <4 x ptr addrspace(3)> %array to <4 x ptr>
+  %p = getelementptr [1024 x i32], <4 x ptr> %cast, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
+  %p0 = extractelement <4 x ptr> %p, i32 0
+  %p1 = extractelement <4 x ptr> %p, i32 1
+  %p2 = extractelement <4 x ptr> %p, i32 2
+  %p3 = extractelement <4 x ptr> %p, i32 3
+  store i32 99, ptr %p0
+  store i32 99, ptr %p1
+  store i32 99, ptr %p2
+  store i32 99, ptr %p3
   ret void
 }
 
 ; CHECK-LABEL: @repeated_constexpr_gep_addrspacecast(
-; CHECK-NEXT: %gep0 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx0
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep0, align 8
-; CHECK-NEXT: %gep1 = getelementptr inbounds double, double addrspace(3)* getelementptr inbounds ([648 x double], [648 x double] addrspace(3)* @lds, i64 0, i64 384), i64 %idx1
-; CHECK-NEXT: store double 1.000000e+00, double addrspace(3)* %gep1, align 8
+; CHECK-NEXT: %gep0 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx0
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep0, align 8
+; CHECK-NEXT: %gep1 = getelementptr inbounds double, ptr addrspace(3) getelementptr inbounds ([648 x double], ptr addrspace(3) @lds, i64 0, i64 384), i64 %idx1
+; CHECK-NEXT: store double 1.000000e+00, ptr addrspace(3) %gep1, align 8
 ; CHECK-NEXT: ret void
 define void @repeated_constexpr_gep_addrspacecast(i64 %idx0, i64 %idx1) {
-  %gep0 = getelementptr inbounds double, double* getelementptr ([648 x double], [648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*), i64 0, i64 384), i64 %idx0
-  %asc0 = addrspacecast double* %gep0 to double addrspace(3)*
-  store double 1.0, double addrspace(3)* %asc0, align 8
+  %gep0 = getelementptr inbounds double, ptr getelementptr ([648 x double], ptr addrspacecast (ptr addrspace(3) @lds to ptr), i64 0, i64 384), i64 %idx0
+  %asc0 = addrspacecast ptr %gep0 to ptr addrspace(3)
+  store double 1.0, ptr addrspace(3) %asc0, align 8
 
-  %gep1 = getelementptr inbounds double, double* getelementptr ([648 x double], [648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*), i64 0, i64 384), i64 %idx1
-  %asc1 = addrspacecast double* %gep1 to double addrspace(3)*
-  store double 1.0, double addrspace(3)* %asc1, align 8
+  %gep1 = getelementptr inbounds double, ptr getelementptr ([648 x double], ptr addrspacecast (ptr addrspace(3) @lds to ptr), i64 0, i64 384), i64 %idx1
+  %asc1 = addrspacecast ptr %gep1 to ptr addrspace(3)
+  store double 1.0, ptr addrspace(3) %asc1, align 8
 
   ret void
 }
 
 ; CHECK-LABEL: @unorder_constexpr_gep_bitcast(
-; CHECK-NEXT: %x0 = load i32, i32 addrspace(3)* bitcast ([648 x double] addrspace(3)* @lds to i32 addrspace(3)*), align 4
-; CHECK-NEXT: %x1 = load i32, i32 addrspace(3)* getelementptr (i32, i32 addrspace(3)* bitcast ([648 x double] addrspace(3)* @lds to i32 addrspace(3)*), i32 1), align 4
+; CHECK-NEXT: %x0 = load i32, ptr addrspace(3) @lds, align 4
+; CHECK-NEXT: %x1 = load i32, ptr addrspace(3) getelementptr inbounds (i32, ptr addrspace(3) @lds, i32 1), align 4
 define void @unorder_constexpr_gep_bitcast() {
-  %x0 = load i32, i32* bitcast ([648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*) to i32*), align 4
-  %x1 = load i32, i32* getelementptr (i32, i32* bitcast ([648 x double]* addrspacecast ([648 x double] addrspace(3)* @lds to [648 x double]*) to i32*), i32 1), align 4
+  %x0 = load i32, ptr addrspacecast (ptr addrspace(3) @lds to ptr), align 4
+  %x1 = load i32, ptr getelementptr (i32, ptr addrspacecast (ptr addrspace(3) @lds to ptr), i32 1), align 4
   call void @use(i32 %x0, i32 %x1)
   ret void
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
index 7baf922d8868b..cada078299e7b 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
@@ -3,65 +3,61 @@
 
 ; Addrspacecasts or bitcasts must be inserted after the instructions that define their uses.
 
-%struct.s0 = type { i32*, i32 }
+%struct.s0 = type { ptr, i32 }
 %struct.s1 = type { %struct.s0 }
 
 @global0 = protected addrspace(4) externally_initialized global %struct.s1 zeroinitializer
 
-declare i32 @func(i32* %arg)
+declare i32 @func(ptr %arg)
 
 define i32 @addrspacecast_insert_pos_assert() {
 ; CHECK-LABEL: @addrspacecast_insert_pos_assert(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i32*, i32* addrspace(4)* getelementptr inbounds ([[STRUCT_S1:%.*]], [[STRUCT_S1]] addrspace(4)* @global0, i32 0, i32 0, i32 0), align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast i32* [[LOAD0]] to i32 addrspace(1)*
-; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast i32 addrspace(1)* [[TMP1]] to i32*
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, i32 addrspace(5)* [[ALLOCA]], align 4
+; CHECK-NEXT:    [[LOAD0:%.*]] = load ptr, ptr addrspace(4) @global0, align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[LOAD0]] to ptr addrspace(1)
+; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[TMP1]] to ptr
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr addrspace(5) [[ALLOCA]], align 4
 ; CHECK-NEXT:    [[SEXT:%.*]] = sext i32 [[LOAD1]] to i64
-; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[SEXT]]
-; CHECK-NEXT:    [[CALL:%.*]] = call i32 @func(i32* [[GEP]])
+; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[SEXT]]
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @func(ptr [[GEP]])
 ; CHECK-NEXT:    ret i32 [[CALL]]
 ;
   %alloca = alloca i32, align 4, addrspace(5)
-  %cast = addrspacecast i32 addrspace(5)* %alloca to i32*
-  %load0 = load i32*, i32* addrspace(4)* getelementptr inbounds (%struct.s1, %struct.s1 addrspace(4)* @global0, i32 0, i32 0, i32 0)
-  %load1 = load i32, i32* %cast
+  %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+  %load0 = load ptr, ptr addrspace(4) @global0
+  %load1 = load i32, ptr %cast
   %sext = sext i32 %load1 to i64
-  %gep = getelementptr inbounds i32, i32* %load0, i64 %sext
-  %call = call i32 @func(i32* %gep)
+  %gep = getelementptr inbounds i32, ptr %load0, i64 %sext
+  %call = call i32 @func(ptr %gep)
   ret i32 %call
 }
 
 define void @bitcast_insert_pos_assert_1() {
 ; CHECK-LABEL: @bitcast_insert_pos_assert_1(
 ; CHECK-NEXT:  bb.0:
-; CHECK-NEXT:    [[ASC0:%.*]] = bitcast [[STRUCT_S1:%.*]] addrspace(5)* undef to i8 addrspace(5)*
-; CHECK-NEXT:    [[BC0:%.*]] = bitcast i8 addrspace(5)* [[ASC0]] to [[STRUCT_S1]] addrspace(5)*
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [[STRUCT_S1]] addrspace(5)* [[BC0]] to double* addrspace(5)*
-; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast double* addrspace(5)* [[TMP0]] to %struct.s1*
-; CHECK-NEXT:    [[PTI0:%.*]] = ptrtoint %struct.s1* [[TMP1]] to i64
+; CHECK-NEXT:    [[ASC0:%.*]] = addrspacecast ptr addrspace(5) undef to ptr
+; CHECK-NEXT:    [[PTI0:%.*]] = ptrtoint ptr [[ASC0]] to i64
 ; CHECK-NEXT:    br label [[BB_1:%.*]]
 ; CHECK:       bb.1:
 ; CHECK-NEXT:    br i1 undef, label [[BB_2:%.*]], label [[BB_3:%.*]]
 ; CHECK:       bb.2:
-; CHECK-NEXT:    [[LOAD0:%.*]] = load double*, double* addrspace(5)* [[TMP0]], align 8
+; CHECK-NEXT:    [[LOAD0:%.*]] = load ptr, ptr addrspace(5) undef, align 8
 ; CHECK-NEXT:    br label [[BB_3]]
 ; CHECK:       bb.3:
 ; CHECK-NEXT:    ret void
 ;
 bb.0:
-  %asc0 = addrspacecast %struct.s1 addrspace(5)* undef to i8*
-  %bc0 = bitcast i8* %asc0 to %struct.s1*
-  %pti0 = ptrtoint %struct.s1* %bc0 to i64
+  %asc0 = addrspacecast ptr addrspace(5) undef to ptr
+  %pti0 = ptrtoint ptr %asc0 to i64
   br label %bb.1
 
 bb.1:
   br i1 undef, label %bb.2, label %bb.3
 
 bb.2:
-  %pti1 = ptrtoint %struct.s1* %bc0 to i64
-  %itp0 = inttoptr i64 %pti1 to double**
-  %load0 = load double*, double** %itp0, align 8
+  %pti1 = ptrtoint ptr %asc0 to i64
+  %itp0 = inttoptr i64 %pti1 to ptr
+  %load0 = load ptr, ptr %itp0, align 8
   br label %bb.3
 
 bb.3:
@@ -71,23 +67,19 @@ bb.3:
 define void @bitcast_insert_pos_assert_2() {
 ; CHECK-LABEL: @bitcast_insert_pos_assert_2(
 ; CHECK-NEXT:    [[ALLOCA0:%.*]] = alloca [[STRUCT_S1:%.*]], align 16, addrspace(5)
-; CHECK-NEXT:    [[ASC0:%.*]] = bitcast [[STRUCT_S1]] addrspace(5)* [[ALLOCA0]] to i8 addrspace(5)*
-; CHECK-NEXT:    [[BC0:%.*]] = bitcast i8 addrspace(5)* [[ASC0]] to [[STRUCT_S1]] addrspace(5)*
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast [[STRUCT_S1]] addrspace(5)* [[BC0]] to i64 addrspace(5)*
-; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast i64 addrspace(5)* [[TMP1]] to %struct.s1*
-; CHECK-NEXT:    [[PTI0:%.*]] = ptrtoint %struct.s1* [[TMP2]] to i64
-; CHECK-NEXT:    [[ITP0:%.*]] = inttoptr i64 [[PTI0]] to i64*
-; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast i64 addrspace(5)* [[TMP1]] to i64*
-; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr i64, i64* [[TMP3]], i64 0
+; CHECK-NEXT:    [[ASC0:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA0]] to ptr
+; CHECK-NEXT:    [[PTI0:%.*]] = ptrtoint ptr [[ASC0]] to i64
+; CHECK-NEXT:    [[ITP0:%.*]] = inttoptr i64 [[PTI0]] to ptr
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr addrspace(5) [[ALLOCA0]] to ptr
+; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr i64, ptr [[TMP1]], i64 1
 ; CHECK-NEXT:    ret void
 ;
   %alloca0 = alloca %struct.s1, align 16, addrspace(5)
-  %asc0 = addrspacecast %struct.s1 addrspace(5)* %alloca0 to i8*
-  %bc0 = bitcast i8* %asc0 to %struct.s1*
-  %pti0 = ptrtoint %struct.s1* %bc0 to i64
-  %itp0 = inttoptr i64 %pti0 to i64*
-  %itp1 = ptrtoint %struct.s1* %bc0 to i64
-  %itp2 = inttoptr i64 %itp1 to i64*
-  %gep0 = getelementptr i64, i64* %itp2, i64 0
+  %asc0 = addrspacecast ptr addrspace(5) %alloca0 to ptr
+  %pti0 = ptrtoint ptr %asc0 to i64
+  %itp0 = inttoptr i64 %pti0 to ptr
+  %itp1 = ptrtoint ptr %asc0 to i64
+  %itp2 = inttoptr i64 %itp1 to ptr
+  %gep0 = getelementptr i64, i64* %itp2, i64 1
   ret void
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
index 9f574faddfdca..058d160e88612 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/intrinsics.ll
@@ -1,136 +1,136 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
 
 ; CHECK-LABEL: @objectsize_group_to_flat_i32(
-; CHECK: %val = call i32 @llvm.objectsize.i32.p3i8(i8 addrspace(3)* %group.ptr, i1 true, i1 false, i1 false)
-define i32 @objectsize_group_to_flat_i32(i8 addrspace(3)* %group.ptr) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-  %val = call i32 @llvm.objectsize.i32.p0i8(i8* %cast, i1 true, i1 false, i1 false)
+; CHECK: %val = call i32 @llvm.objectsize.i32.p3(ptr addrspace(3) %group.ptr, i1 true, i1 false, i1 false)
+define i32 @objectsize_group_to_flat_i32(ptr addrspace(3) %group.ptr) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %val = call i32 @llvm.objectsize.i32.p0(ptr %cast, i1 true, i1 false, i1 false)
   ret i32 %val
 }
 
 ; CHECK-LABEL: @objectsize_global_to_flat_i64(
-; CHECK: %val = call i64 @llvm.objectsize.i64.p3i8(i8 addrspace(3)* %global.ptr, i1 true, i1 false, i1 false)
-define i64 @objectsize_global_to_flat_i64(i8 addrspace(3)* %global.ptr) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %global.ptr to i8*
-  %val = call i64 @llvm.objectsize.i64.p0i8(i8* %cast, i1 true, i1 false, i1 false)
+; CHECK: %val = call i64 @llvm.objectsize.i64.p3(ptr addrspace(3) %global.ptr, i1 true, i1 false, i1 false)
+define i64 @objectsize_global_to_flat_i64(ptr addrspace(3) %global.ptr) #0 {
+  %cast = addrspacecast ptr addrspace(3) %global.ptr to ptr
+  %val = call i64 @llvm.objectsize.i64.p0(ptr %cast, i1 true, i1 false, i1 false)
   ret i64 %val
 }
 
 ; CHECK-LABEL: @atomicinc_global_to_flat_i32(
-; CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %y, i32 0, i32 0, i1 false)
-define i32 @atomicinc_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %cast, i32 %y, i32 0, i32 0, i1 false)
+; CHECK: call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %global.ptr, i32 %y, i32 0, i32 0, i1 false)
+define i32 @atomicinc_global_to_flat_i32(ptr addrspace(1) %global.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %cast, i32 %y, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @atomicinc_group_to_flat_i32(
-; CHECK: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %y, i32 0, i32 0, i1 false)
-define i32 @atomicinc_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %cast, i32 %y, i32 0, i32 0, i1 false)
+; CHECK: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %group.ptr, i32 %y, i32 0, i32 0, i1 false)
+define i32 @atomicinc_group_to_flat_i32(ptr addrspace(3) %group.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr %cast, i32 %y, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @atomicinc_global_to_flat_i64(
-; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
-define i64 @atomicinc_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 false)
+; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %global.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicinc_global_to_flat_i64(ptr addrspace(1) %global.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @atomicinc_group_to_flat_i64(
-; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false)
-define i64 @atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 false)
+; CHECK: call i64 @llvm.amdgcn.atomic.inc.i64.p3(ptr addrspace(3) %group.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicinc_group_to_flat_i64(ptr addrspace(3) %group.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @atomicdec_global_to_flat_i32(
-; CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %global.ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %global.ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @atomicdec_global_to_flat_i32(ptr addrspace(1) %global.ptr, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @atomicdec_group_to_flat_i32(
-; CHECK: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %group.ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %group.ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @atomicdec_group_to_flat_i32(ptr addrspace(3) %group.ptr, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @atomicdec_global_to_flat_i64(
-; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %global.ptr, i64 %y, i32 0, i32 0, i1 false)
-define i64 @atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 false)
+; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %global.ptr, i64 %y, i32 0, i32 0, i1 false)
+define i64 @atomicdec_global_to_flat_i64(ptr addrspace(1) %global.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @atomicdec_group_to_flat_i64(
-; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p3i64(i64 addrspace(3)* %group.ptr, i64 %y, i32 0, i32 0, i1 false
-define i64 @atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 false)
+; CHECK: call i64 @llvm.amdgcn.atomic.dec.i64.p3(ptr addrspace(3) %group.ptr, i64 %y, i32 0, i32 0, i1 false
+define i64 @atomicdec_group_to_flat_i64(ptr addrspace(3) %group.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicinc_group_to_flat_i64(
-; CHECK-NEXT: %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
-define i64 @volatile_atomicinc_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
+; CHECK-NEXT: %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicinc_group_to_flat_i64(ptr addrspace(3) %group.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i32(
-; CHECK-NEXT: %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 true)
-define i32 @volatile_atomicdec_global_to_flat_i32(i32 addrspace(1)* %global.ptr, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 true)
+; CHECK-NEXT: %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 true)
+define i32 @volatile_atomicdec_global_to_flat_i32(ptr addrspace(1) %global.ptr, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 true)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i32(
-; CHECK-NEXT: %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 true)
-define i32 @volatile_atomicdec_group_to_flat_i32(i32 addrspace(3)* %group.ptr, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* %cast, i32 %val, i32 0, i32 0, i1 true)
+; CHECK-NEXT: %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK-NEXT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 true)
+define i32 @volatile_atomicdec_group_to_flat_i32(ptr addrspace(3) %group.ptr, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr %cast, i32 %val, i32 0, i32 0, i1 true)
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicdec_global_to_flat_i64(
-; CHECK-NEXT: %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64*
-; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
-define i64 @volatile_atomicdec_global_to_flat_i64(i64 addrspace(1)* %global.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(1)* %global.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
+; CHECK-NEXT: %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicdec_global_to_flat_i64(ptr addrspace(1) %global.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
   ret i64 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicdec_group_to_flat_i64(
-; CHECK-NEXT: %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
-define i64 @volatile_atomicdec_group_to_flat_i64(i64 addrspace(3)* %group.ptr, i64 %y) #0 {
-  %cast = addrspacecast i64 addrspace(3)* %group.ptr to i64*
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* %cast, i64 %y, i32 0, i32 0, i1 true)
+; CHECK-NEXT: %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK-NEXT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
+define i64 @volatile_atomicdec_group_to_flat_i64(ptr addrspace(3) %group.ptr, i64 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr %cast, i64 %y, i32 0, i32 0, i1 true)
   ret i64 %ret
 }
 
-declare i32 @llvm.objectsize.i32.p0i8(i8*, i1, i1, i1) #1
-declare i64 @llvm.objectsize.i64.p0i8(i8*, i1, i1, i1) #1
-declare i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* nocapture, i32, i32, i32, i1) #2
-declare i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* nocapture, i64, i32, i32, i1) #2
-declare i32 @llvm.amdgcn.atomic.dec.i32.p0i32(i32* nocapture, i32, i32, i32, i1) #2
-declare i64 @llvm.amdgcn.atomic.dec.i64.p0i64(i64* nocapture, i64, i32, i32, i1) #2
+declare i32 @llvm.objectsize.i32.p0(ptr, i1, i1, i1) #1
+declare i64 @llvm.objectsize.i64.p0(ptr, i1, i1, i1) #1
+declare i32 @llvm.amdgcn.atomic.inc.i32.p0(ptr nocapture, i32, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.inc.i64.p0(ptr nocapture, i64, i32, i32, i1) #2
+declare i32 @llvm.amdgcn.atomic.dec.i32.p0(ptr nocapture, i32, i32, i32, i1) #2
+declare i64 @llvm.amdgcn.atomic.dec.i64.p0(ptr nocapture, i64, i32, i32, i1) #2
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue53665.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue53665.ll
index fcc1f56affbb6..d372750df7d31 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue53665.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/issue53665.ll
@@ -2,53 +2,51 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -o - %s | FileCheck %s
 ; https://github.com/llvm/llvm-project/issues/53665
 
-define i32 @addrspacecast_ptrtoint_inttoptr(i8 addrspace(1)* %arg) {
+define i32 @addrspacecast_ptrtoint_inttoptr(ptr addrspace(1) %arg) {
 ; CHECK-LABEL: @addrspacecast_ptrtoint_inttoptr(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8 addrspace(1)* [[ARG:%.*]] to i32 addrspace(1)*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, i32 addrspace(1)* [[TMP0]], align 4
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr addrspace(1) [[ARG:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
 bb:
-  %asc = addrspacecast i8 addrspace(1)* %arg to i8*
-  %p2i = ptrtoint i8* %asc to i64
-  %i2p = inttoptr i64 %p2i to i32*
-  %load = load i32, i32* %i2p
+  %asc = addrspacecast ptr addrspace(1) %arg to ptr
+  %p2i = ptrtoint ptr %asc to i64
+  %i2p = inttoptr i64 %p2i to ptr
+  %load = load i32, ptr %i2p
   ret i32 %load
 }
 
-define i32 @assumed_ptrtoint_inttoptr(i8* %arg) {
+define i32 @assumed_ptrtoint_inttoptr(ptr %arg) {
 bb:
-  %is.priv = call i1 @llvm.amdgcn.is.private(i8* %arg)
+  %is.priv = call i1 @llvm.amdgcn.is.private(ptr %arg)
   %not.is.priv = xor i1 %is.priv, -1
-  %is.shared = call i1 @llvm.amdgcn.is.shared(i8* %arg)
+  %is.shared = call i1 @llvm.amdgcn.is.shared(ptr %arg)
   %not.is.shared = xor i1 %is.shared, -1
   %and = and i1 %not.is.priv, %not.is.shared
   tail call void @llvm.assume(i1 %and)
-  %p2i = ptrtoint i8* %arg to i64
-  %i2p = inttoptr i64 %p2i to i32*
-  %load = load i32, i32* %i2p
+  %p2i = ptrtoint ptr %arg to i64
+  %i2p = inttoptr i64 %p2i to ptr
+  %load = load i32, ptr %i2p
   ret i32 %load
 }
 
-define i32 @addrspacecast_ptrtoint_inttptr_nontrivial(i8 addrspace(3)* %arg) {
+define i32 @addrspacecast_ptrtoint_inttptr_nontrivial(ptr addrspace(3) %arg) {
 ; CHECK-LABEL: @addrspacecast_ptrtoint_inttptr_nontrivial(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8 addrspace(3)* [[ARG:%.*]] to i32 addrspace(3)*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i32, i32 addrspace(3)* [[TMP0]], align 4
+; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr addrspace(3) [[ARG:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
 bb:
-  %asc = addrspacecast i8 addrspace(3)* %arg to i8*
-  %p2i = ptrtoint i8* %asc to i64
-  %i2p = inttoptr i64 %p2i to i32*
-  %load = load i32, i32* %i2p
+  %asc = addrspacecast ptr addrspace(3) %arg to ptr
+  %p2i = ptrtoint ptr %asc to i64
+  %i2p = inttoptr i64 %p2i to ptr
+  %load = load i32, ptr %i2p
   ret i32 %load
 }
 
 declare void @llvm.assume(i1 noundef) #0
-declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #1
-declare i1 @llvm.amdgcn.is.private(i8* nocapture) #1
+declare i1 @llvm.amdgcn.is.shared(ptr nocapture) #1
+declare i1 @llvm.amdgcn.is.private(ptr nocapture) #1
 
 attributes #0 = { inaccessiblememonly nofree nosync nounwind willreturn }
 attributes #1 = { nounwind readnone speculatable willreturn }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
index 550bf7352e82f..c7bf2be827a19 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/mem-intrinsics.ll
@@ -1,134 +1,134 @@
 ; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
 
 ; CHECK-LABEL: @memset_group_to_flat(
-; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memset.p3.i64(ptr addrspace(3) align 4 %group.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memset_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memset_global_to_flat(
-; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %global.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
-  %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 %global.ptr, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memset_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memset_group_to_flat_no_md(
-; CHECK: call void @llvm.memset.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 4, i64 %size, i1 false){{$}}
-define amdgpu_kernel void @memset_group_to_flat_no_md(i8 addrspace(3)* %group.ptr, i64 %size) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 %size, i1 false)
+; CHECK: call void @llvm.memset.p3.i64(ptr addrspace(3) align 4 %group.ptr, i8 4, i64 %size, i1 false){{$}}
+define amdgpu_kernel void @memset_group_to_flat_no_md(ptr addrspace(3) %group.ptr, i64 %size) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 %size, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: @memset_global_to_flat_no_md(
-; CHECK: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 %global.ptr, i8 4, i64 %size, i1 false){{$}}
-define amdgpu_kernel void @memset_global_to_flat_no_md(i8 addrspace(1)* %global.ptr, i64 %size) #0 {
-  %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 %size, i1 false)
+; CHECK: call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 %global.ptr, i8 4, i64 %size, i1 false){{$}}
+define amdgpu_kernel void @memset_global_to_flat_no_md(ptr addrspace(1) %global.ptr, i64 %size) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 %size, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group(
-; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p0.p3.i64(ptr align 4 %dest, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group(ptr %dest, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_inline_flat_to_flat_replace_src_with_group(
-; CHECK: call void @llvm.memcpy.inline.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 42, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_inline_flat_to_flat_replace_src_with_group(i8* %dest, i8 addrspace(3)* %src.group.ptr) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 42, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.inline.p0.p3.i64(ptr align 4 %dest, ptr addrspace(3) align 4 %src.group.ptr, i64 42, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_inline_flat_to_flat_replace_src_with_group(ptr %dest, ptr addrspace(3) %src.group.ptr) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr align 4 %dest, ptr align 4 %cast.src, i64 42, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_with_group(
-; CHECK: call void @llvm.memcpy.p3i8.p0i8.i64(i8 addrspace(3)* align 4 %dest.group.ptr, i8* align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_with_group(i8 addrspace(3)* %dest.group.ptr, i8* %src.ptr, i64 %size) #0 {
-  %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p3.p0.i64(ptr addrspace(3) align 4 %dest.group.ptr, ptr align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_with_group(ptr addrspace(3) %dest.group.ptr, ptr %src.ptr, i64 %size) #0 {
+  %cast.dest = addrspacecast ptr addrspace(3) %dest.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %cast.dest, ptr align 4 %src.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_src_with_group(
-; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* align 4 %src.group.ptr, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_src_with_group(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  %cast.dest = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) align 4 %src.group.ptr, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_src_with_group(ptr addrspace(3) %dest.group.ptr, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  %cast.dest = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %cast.dest, ptr align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_dest_group_src_global(
-; CHECK: call void @llvm.memcpy.p3i8.p1i8.i64(i8 addrspace(3)* align 4 %dest.group.ptr, i8 addrspace(1)* align 4 %src.global.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_group_src_global(i8 addrspace(3)* %dest.group.ptr, i8 addrspace(1)* %src.global.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(1)* %src.global.ptr to i8*
-  %cast.dest = addrspacecast i8 addrspace(3)* %dest.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast.dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p3.p1.i64(ptr addrspace(3) align 4 %dest.group.ptr, ptr addrspace(1) align 4 %src.global.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_dest_group_src_global(ptr addrspace(3) %dest.group.ptr, ptr addrspace(1) %src.global.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(1) %src.global.ptr to ptr
+  %cast.dest = addrspacecast ptr addrspace(3) %dest.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %cast.dest, ptr align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_group_to_flat_replace_dest_global(
-; CHECK: call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* align 4 %dest.global.ptr, i8 addrspace(3)* align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_group_to_flat_replace_dest_global(i8 addrspace(1)* %dest.global.ptr, i8 addrspace(3)* %src.group.ptr, i32 %size) #0 {
-  %cast.dest = addrspacecast i8 addrspace(1)* %dest.global.ptr to i8*
-  call void @llvm.memcpy.p0i8.p3i8.i32(i8* align 4 %cast.dest, i8 addrspace(3)* align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p1.p3.i32(ptr addrspace(1) align 4 %dest.global.ptr, ptr addrspace(3) align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_group_to_flat_replace_dest_global(ptr addrspace(1) %dest.global.ptr, ptr addrspace(3) %src.group.ptr, i32 %size) #0 {
+  %cast.dest = addrspacecast ptr addrspace(1) %dest.global.ptr to ptr
+  call void @llvm.memcpy.p0.p3.i32(ptr align 4 %cast.dest, ptr addrspace(3) align 4 %src.group.ptr, i32 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(
-; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa.struct !8
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa.struct !8
+; CHECK: call void @llvm.memcpy.p0.p3.i64(ptr align 4 %dest, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false), !tbaa.struct !8
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_tbaa_struct(ptr %dest, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %cast.src, i64 %size, i1 false), !tbaa.struct !8
   ret void
 }
 
 ; CHECK-LABEL: @memcpy_flat_to_flat_replace_src_with_group_no_md(
-; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}}
-define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_no_md(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p3.i64(ptr align 4 %dest, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false){{$}}
+define amdgpu_kernel void @memcpy_flat_to_flat_replace_src_with_group_no_md(ptr %dest, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest, ptr align 4 %cast.src, i64 %size, i1 false)
   ret void
 }
 
 ; CHECK-LABEL: @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(
-; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest0, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}}
-; CHECK: call void @llvm.memcpy.p0i8.p3i8.i64(i8* align 4 %dest1, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false){{$}}
-define amdgpu_kernel void @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(i8* %dest0, i8* %dest1, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest0, i8* align 4 %cast.src, i64 %size, i1 false)
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %dest1, i8* align 4 %cast.src, i64 %size, i1 false)
+; CHECK: call void @llvm.memcpy.p0.p3.i64(ptr align 4 %dest0, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false){{$}}
+; CHECK: call void @llvm.memcpy.p0.p3.i64(ptr align 4 %dest1, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false){{$}}
+define amdgpu_kernel void @multiple_memcpy_flat_to_flat_replace_src_with_group_no_md(ptr %dest0, ptr %dest1, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest0, ptr align 4 %cast.src, i64 %size, i1 false)
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %dest1, ptr align 4 %cast.src, i64 %size, i1 false)
   ret void
 }
 
 ; Check for iterator problems if the pointer has 2 uses in the same call
 ; CHECK-LABEL: @memcpy_group_flat_to_flat_self(
-; CHECK: call void @llvm.memcpy.p3i8.p3i8.i64(i8 addrspace(3)* align 4 %group.ptr, i8 addrspace(3)* align 4 %group.ptr, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memcpy_group_flat_to_flat_self(i8 addrspace(3)* %group.ptr) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %cast, i8* align 4 %cast, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) align 4 %group.ptr, ptr addrspace(3) align 4 %group.ptr, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memcpy_group_flat_to_flat_self(ptr addrspace(3) %group.ptr) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %cast, ptr align 4 %cast, i64 32, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 ; CHECK-LABEL: @memmove_flat_to_flat_replace_src_with_group(
-; CHECK: call void @llvm.memmove.p0i8.p3i8.i64(i8* align 4 %dest, i8 addrspace(3)* align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
-define amdgpu_kernel void @memmove_flat_to_flat_replace_src_with_group(i8* %dest, i8 addrspace(3)* %src.group.ptr, i64 %size) #0 {
-  %cast.src = addrspacecast i8 addrspace(3)* %src.group.ptr to i8*
-  call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 %dest, i8* align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+; CHECK: call void @llvm.memmove.p0.p3.i64(ptr align 4 %dest, ptr addrspace(3) align 4 %src.group.ptr, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
+define amdgpu_kernel void @memmove_flat_to_flat_replace_src_with_group(ptr %dest, ptr addrspace(3) %src.group.ptr, i64 %size) #0 {
+  %cast.src = addrspacecast ptr addrspace(3) %src.group.ptr to ptr
+  call void @llvm.memmove.p0.p0.i64(ptr align 4 %dest, ptr align 4 %cast.src, i64 %size, i1 false), !tbaa !0, !alias.scope !3, !noalias !6
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
-declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
-declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
-declare void @llvm.memcpy.p0i8.p3i8.i32(i8* nocapture writeonly, i8 addrspace(3)* nocapture readonly, i32, i1) #1
-declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
+declare void @llvm.memcpy.p0.p3.i32(ptr nocapture writeonly, ptr addrspace(3) nocapture readonly, i32, i1) #1
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/no-flat-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/no-flat-addrspace.ll
index 8e04d3cf13b77..c8c28dfd15832 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/no-flat-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/no-flat-addrspace.ll
@@ -4,10 +4,10 @@
 ; flat instructions. It's still flat, it just doesn't work.
 
 ; CHECK-LABEL: @load_flat_from_global(
-; CHECK-NEXT: %tmp1 = load float, float addrspace(1)* %ptr
+; CHECK-NEXT: %tmp1 = load float, ptr addrspace(1) %ptr
 ; CHECK-NEXT: ret float %tmp1
-define float @load_flat_from_global(float addrspace(1)*%ptr) #0 {
-  %tmp0 = addrspacecast float addrspace(1)* %ptr to float*
-  %tmp1 = load float, float* %tmp0
+define float @load_flat_from_global(ptr addrspace(1) %ptr) #0 {
+  %tmp0 = addrspacecast ptr addrspace(1) %ptr to ptr
+  %tmp1 = load float, ptr %tmp0
   ret float %tmp1
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
index d45b0ab8803f7..66c4fc8d44d3e 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/noop-ptrint-pair.ll
@@ -4,48 +4,48 @@
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
 ; COMMON-LABEL: @noop_ptrint_pair(
-; AMDGCN-NEXT: store i32 0, i32 addrspace(1)* %{{.*}}
+; AMDGCN-NEXT: store i32 0, ptr addrspace(1) %{{.*}}
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: %1 = ptrtoint i32 addrspace(1)* %x.coerce to i64
-; NOTTI-NEXT: %2 = inttoptr i64 %1 to i32*
-; NOTTI-NEXT: store i32 0, i32* %2
+; NOTTI-NEXT: %1 = ptrtoint ptr addrspace(1) %x.coerce to i64
+; NOTTI-NEXT: %2 = inttoptr i64 %1 to ptr
+; NOTTI-NEXT: store i32 0, ptr %2
 ; NOTTI-NEXT: ret void
-define void @noop_ptrint_pair(i32 addrspace(1)* %x.coerce) {
-  %1 = ptrtoint i32 addrspace(1)* %x.coerce to i64
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+define void @noop_ptrint_pair(ptr addrspace(1) %x.coerce) {
+  %1 = ptrtoint ptr addrspace(1) %x.coerce to i64
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair(
-; AMDGCN-NEXT: ptrtoint i32 addrspace(3)* %{{.*}} to i64
-; AMDGCN-NEXT: inttoptr i64 %{{.*}} to i32*
-; AMDGCN-NEXT: store i32 0, i32* %{{.*}}
+; AMDGCN-NEXT: ptrtoint ptr addrspace(3) %{{.*}} to i64
+; AMDGCN-NEXT: inttoptr i64 %{{.*}} to ptr
+; AMDGCN-NEXT: store i32 0, ptr %{{.*}}
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: ptrtoint i32 addrspace(3)* %{{.*}} to i64
-; NOTTI-NEXT: inttoptr i64 %{{.*}} to i32*
-; NOTTI-NEXT: store i32 0, i32* %{{.*}}
+; NOTTI-NEXT: ptrtoint ptr addrspace(3) %{{.*}} to i64
+; NOTTI-NEXT: inttoptr i64 %{{.*}} to ptr
+; NOTTI-NEXT: store i32 0, ptr %{{.*}}
 ; NOTTI-NEXT: ret void
-define void @non_noop_ptrint_pair(i32 addrspace(3)* %x.coerce) {
-  %1 = ptrtoint i32 addrspace(3)* %x.coerce to i64
-  %2 = inttoptr i64 %1 to i32*
-  store i32 0, i32* %2
+define void @non_noop_ptrint_pair(ptr addrspace(3) %x.coerce) {
+  %1 = ptrtoint ptr addrspace(3) %x.coerce to i64
+  %2 = inttoptr i64 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair2(
-; AMDGCN-NEXT: ptrtoint i32 addrspace(1)* %{{.*}} to i32
-; AMDGCN-NEXT: inttoptr i32 %{{.*}} to i32*
-; AMDGCN-NEXT: store i32 0, i32* %{{.*}}
+; AMDGCN-NEXT: ptrtoint ptr addrspace(1) %{{.*}} to i32
+; AMDGCN-NEXT: inttoptr i32 %{{.*}} to ptr
+; AMDGCN-NEXT: store i32 0, ptr %{{.*}}
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: ptrtoint i32 addrspace(1)* %{{.*}} to i32
-; NOTTI-NEXT: inttoptr i32 %{{.*}} to i32*
-; NOTTI-NEXT: store i32 0, i32* %{{.*}}
+; NOTTI-NEXT: ptrtoint ptr addrspace(1) %{{.*}} to i32
+; NOTTI-NEXT: inttoptr i32 %{{.*}} to ptr
+; NOTTI-NEXT: store i32 0, ptr %{{.*}}
 ; NOTTI-NEXT: ret void
-define void @non_noop_ptrint_pair2(i32 addrspace(1)* %x.coerce) {
-  %1 = ptrtoint i32 addrspace(1)* %x.coerce to i32
-  %2 = inttoptr i32 %1 to i32*
-  store i32 0, i32* %2
+define void @non_noop_ptrint_pair2(ptr addrspace(1) %x.coerce) {
+  %1 = ptrtoint ptr addrspace(1) %x.coerce to i32
+  %2 = inttoptr i32 %1 to ptr
+  store i32 0, ptr %2
   ret void
 }
 
@@ -53,59 +53,59 @@ define void @non_noop_ptrint_pair2(i32 addrspace(1)* %x.coerce) {
 @l = addrspace(3) global i32 0, align 4
 
 ; COMMON-LABEL: @noop_ptrint_pair_ce(
-; AMDGCN-NEXT: store i32 0, i32 addrspace(1)* @g
+; AMDGCN-NEXT: store i32 0, ptr addrspace(1) @g
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: store i32 0, i32* inttoptr (i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*)
+; NOTTI-NEXT: store i32 0, ptr inttoptr (i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr)
 ; NOTTI-NEXT: ret void
 define void @noop_ptrint_pair_ce() {
-  store i32 0, i32* inttoptr (i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*)
+  store i32 0, ptr inttoptr (i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr)
   ret void
 }
 
 ; COMMON-LABEL: @noop_ptrint_pair_ce2(
-; AMDGCN-NEXT: ret i32* addrspacecast (i32 addrspace(1)* @g to i32*)
-; NOTTI-NEXT: ret i32* inttoptr (i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*)
-define i32* @noop_ptrint_pair_ce2() {
-  ret i32* inttoptr (i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*)
+; AMDGCN-NEXT: ret ptr addrspacecast (ptr addrspace(1) @g to ptr)
+; NOTTI-NEXT: ret ptr inttoptr (i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr)
+define ptr @noop_ptrint_pair_ce2() {
+  ret ptr inttoptr (i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr)
 }
 
 ; COMMON-LABEL: @noop_ptrint_pair_ce3(
-; AMDGCN-NEXT: %i = inttoptr i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*
+; AMDGCN-NEXT: %i = inttoptr i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: %i = inttoptr i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*
+; NOTTI-NEXT: %i = inttoptr i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr
 ; NOTTI-NEXT: ret void
 define void @noop_ptrint_pair_ce3() {
-  %i = inttoptr i64 ptrtoint (i32 addrspace(1)* @g to i64) to i32*
+  %i = inttoptr i64 ptrtoint (ptr addrspace(1) @g to i64) to ptr
   ret void
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair_ce(
-; AMDGCN-NEXT: store i32 0, i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
+; AMDGCN-NEXT: store i32 0, ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
 ; AMDGCN-NEXT: ret void
-; NOTTI-NEXT: store i32 0, i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
+; NOTTI-NEXT: store i32 0, ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
 ; NOTTI-NEXT: ret void
 define void @non_noop_ptrint_pair_ce() {
-  store i32 0, i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
+  store i32 0, ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
   ret void
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair_ce2(
-; AMDGCN-NEXT: ret i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
-; NOTTI-NEXT: ret i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
-define i32* @non_noop_ptrint_pair_ce2() {
-  ret i32* inttoptr (i64 ptrtoint (i32 addrspace(3)* @l to i64) to i32*)
+; AMDGCN-NEXT: ret ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
+; NOTTI-NEXT: ret ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
+define ptr @non_noop_ptrint_pair_ce2() {
+  ret ptr inttoptr (i64 ptrtoint (ptr addrspace(3) @l to i64) to ptr)
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair_ce3(
-; AMDGCN-NEXT: ret i32* inttoptr (i32 ptrtoint (i32 addrspace(1)* @g to i32) to i32*)
-; NOTTI-NEXT: ret i32* inttoptr (i32 ptrtoint (i32 addrspace(1)* @g to i32) to i32*)
-define i32* @non_noop_ptrint_pair_ce3() {
-  ret i32* inttoptr (i32 ptrtoint (i32 addrspace(1)* @g to i32) to i32*)
+; AMDGCN-NEXT: ret ptr inttoptr (i32 ptrtoint (ptr addrspace(1) @g to i32) to ptr)
+; NOTTI-NEXT: ret ptr inttoptr (i32 ptrtoint (ptr addrspace(1) @g to i32) to ptr)
+define ptr @non_noop_ptrint_pair_ce3() {
+  ret ptr inttoptr (i32 ptrtoint (ptr addrspace(1) @g to i32) to ptr)
 }
 
 ; COMMON-LABEL: @non_noop_ptrint_pair_ce4(
-; AMDGCN-NEXT: ret i32* inttoptr (i128 ptrtoint (i32 addrspace(3)* @l to i128) to i32*)
-; NOTTI-NEXT: ret i32* inttoptr (i128 ptrtoint (i32 addrspace(3)* @l to i128) to i32*)
-define i32* @non_noop_ptrint_pair_ce4() {
-  ret i32* inttoptr (i128 ptrtoint (i32 addrspace(3)* @l to i128) to i32*)
+; AMDGCN-NEXT: ret ptr inttoptr (i128 ptrtoint (ptr addrspace(3) @l to i128) to ptr)
+; NOTTI-NEXT: ret ptr inttoptr (i128 ptrtoint (ptr addrspace(3) @l to i128) to ptr)
+define ptr @non_noop_ptrint_pair_ce4() {
+  ret ptr inttoptr (i128 ptrtoint (ptr addrspace(3) @l to i128) to ptr)
 }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions-inseltpoison.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions-inseltpoison.ll
index e0bf4f1058f9e..65fa11050418b 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions-inseltpoison.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions-inseltpoison.ll
@@ -8,13 +8,13 @@
 ; Should generate flat load
 
 ; CHECK-LABEL: @generic_address_bitcast_const(
-; CHECK: %vecload1 = load <2 x double>, <2 x double> addrspace(1)* bitcast (double addrspace(1)* getelementptr inbounds ([100 x double], [100 x double] addrspace(1)* @data, i64 0, i64 4) to <2 x double> addrspace(1)*), align 8
-define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, i32 addrspace(1)* nocapture %results) #0 {
+; CHECK: %vecload1 = load <2 x double>, ptr addrspace(1) getelementptr inbounds ([100 x double], ptr addrspace(1) @data, i64 0, i64 4), align 8
+define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, ptr addrspace(1) nocapture %results) #0 {
 entry:
   %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = add i64 %tmp2, %arg0
-  %vecload1 = load <2 x double>, <2 x double>* bitcast (double* getelementptr ([100 x double], [100 x double]* addrspacecast ([100 x double] addrspace(1)* @data to [100 x double]*), i64 0, i64 4) to <2 x double>*), align 8
+  %vecload1 = load <2 x double>, ptr bitcast (ptr getelementptr ([100 x double], ptr addrspacecast (ptr addrspace(1) @data to ptr), i64 0, i64 4) to ptr), align 8
   %cmp = fcmp ord <2 x double> %vecload1, zeroinitializer
   %sext = sext <2 x i1> %cmp to <2 x i64>
   %tmp4 = extractelement <2 x i64> %sext, i64 0
@@ -23,82 +23,79 @@ entry:
   %tmp7 = lshr i64 %tmp6, 63
   %tmp8 = trunc i64 %tmp7 to i32
   %idxprom = and i64 %tmp3, 4294967295
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %idxprom
-  store i32 %tmp8, i32 addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %idxprom
+  store i32 %tmp8, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 @generic_address_bug9749.val = internal addrspace(1) global float 0.0, align 4
 
-declare i32 @_Z9get_fencePv(i8*)
+declare i32 @_Z9get_fencePv(ptr)
 %opencl.pipe_t = type opaque
 
 ; This is a compile time assert bug, but we still want to check optimization
 ; is performed to generate ld_global.
 ; CHECK-LABEL: @generic_address_pipe_bug9673(
-; CHECK: %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
-; CHECK: %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
-; CHECK: %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
-define amdgpu_kernel void @generic_address_pipe_bug9673(%opencl.pipe_t addrspace(3)* nocapture %in_pipe, i32 addrspace(1)* nocapture %dst) #0 {
+; CHECK: %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
+; CHECK: %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
+define amdgpu_kernel void @generic_address_pipe_bug9673(ptr addrspace(3) nocapture %in_pipe, ptr addrspace(1) nocapture %dst) #0 {
 entry:
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()
-  %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
-  %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
-  %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %dst, i32 %tmp
-  store i32 %tmp2, i32 addrspace(1)* %arrayidx, align 4
+  %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
+  %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %dst, i32 %tmp
+  store i32 %tmp2, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 ; Should generate flat load
 ; CHECK-LABEL: @generic_address_bug9749(
 ; CHECK: br i1
-; CHECK: load float, float*
+; CHECK: load float, ptr
 ; CHECK: br label
-define amdgpu_kernel void @generic_address_bug9749(i32 addrspace(1)* nocapture %results) #0 {
+define amdgpu_kernel void @generic_address_bug9749(ptr addrspace(1) nocapture %results) #0 {
 entry:
-  %ptr = alloca float*, align 8, addrspace(5)
+  %ptr = alloca ptr, align 8, addrspace(5)
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp1 = zext i32 %tmp to i64
-  store float 0x3FB99999A0000000, float addrspace(1)* @generic_address_bug9749.val, align 4
-  store volatile float* addrspacecast (float addrspace(1)* @generic_address_bug9749.val to float*), float* addrspace(5)* %ptr, align 8
-  %tmp2 = load volatile float*, float* addrspace(5)* %ptr, align 8
-  %tmp3 = load float, float addrspace(1)* @generic_address_bug9749.val, align 4
-  %tmp4 = bitcast float* %tmp2 to i8*
-  %call.i = call i32 @_Z9get_fencePv(i8* %tmp4) #1
+  store float 0x3FB99999A0000000, ptr addrspace(1) @generic_address_bug9749.val, align 4
+  store volatile ptr addrspacecast (ptr addrspace(1) @generic_address_bug9749.val to ptr), ptr addrspace(5) %ptr, align 8
+  %tmp2 = load volatile ptr, ptr addrspace(5) %ptr, align 8
+  %tmp3 = load float, ptr addrspace(1) @generic_address_bug9749.val, align 4
+  %call.i = call i32 @_Z9get_fencePv(ptr %tmp2) #1
   %switch.i.i = icmp ult i32 %call.i, 4
   br i1 %switch.i.i, label %if.end.i, label %helperFunction.exit
 
 if.end.i:                                         ; preds = %entry
-  %tmp5 = load float, float* %tmp2, align 4
+  %tmp5 = load float, ptr %tmp2, align 4
   %not.cmp.i = fcmp oeq float %tmp5, %tmp3
   %phitmp = zext i1 %not.cmp.i to i32
   br label %helperFunction.exit
 
 helperFunction.exit:                              ; preds = %if.end.i, %entry
   %retval.0.i = phi i32 [ 0, %entry ], [ %phitmp, %if.end.i ]
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %tmp1
-  store i32 %retval.0.i, i32 addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %tmp1
+  store i32 %retval.0.i, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: @generic_address_opt_phi_bug9776_simple_phi_kernel(
-; CHECK: phi i32 addrspace(3)*
-; CHECK: store i32 %i.03, i32 addrspace(3)* %
-define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(i32 addrspace(3)* nocapture %in, i32 %numElems) #0 {
+; CHECK: phi ptr addrspace(3)
+; CHECK: store i32 %i.03, ptr addrspace(3) %
+define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(ptr addrspace(3) nocapture %in, i32 %numElems) #0 {
 entry:
   %cmp1 = icmp eq i32 %numElems, 0
   br i1 %cmp1, label %for.end, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %entry
-  %tmp = addrspacecast i32 addrspace(3)* %in to i32*
+  %tmp = addrspacecast ptr addrspace(3) %in to ptr
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %i.03 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  %ptr.02 = phi i32* [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
-  store i32 %i.03, i32* %ptr.02, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %ptr.02, i64 4
+  %ptr.02 = phi ptr [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
+  store i32 %i.03, ptr %ptr.02, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %ptr.02, i64 4
   %inc = add nuw i32 %i.03, 1
   %exitcond = icmp eq i32 %inc, %numElems
   br i1 %exitcond, label %for.end, label %for.body
@@ -108,31 +105,29 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; CHECK-LABEL: @generic_address_bug9899(
-; CHECK: %vecload = load <2 x i32>, <2 x i32> addrspace(3)*
-; CHECK: store <2 x i32> %tmp16, <2 x i32> addrspace(3)*
-define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, i32 addrspace(3)* nocapture %sourceA, i32 addrspace(3)* nocapture %destValues) #0 {
+; CHECK: %vecload = load <2 x i32>, ptr addrspace(3)
+; CHECK: store <2 x i32> %tmp16, ptr addrspace(3)
+define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, ptr addrspace(3) nocapture %sourceA, ptr addrspace(3) nocapture %destValues) #0 {
 entry:
   %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = add i64 %tmp2, %arg0
   %sext = shl i64 %tmp3, 32
-  %tmp4 = addrspacecast i32 addrspace(3)* %destValues to i32*
-  %tmp5 = addrspacecast i32 addrspace(3)* %sourceA to i32*
+  %tmp4 = addrspacecast ptr addrspace(3) %destValues to ptr
+  %tmp5 = addrspacecast ptr addrspace(3) %sourceA to ptr
   %tmp6 = ashr exact i64 %sext, 31
-  %tmp7 = getelementptr inbounds i32, i32* %tmp5, i64 %tmp6
-  %arrayidx_v4 = bitcast i32* %tmp7 to <2 x i32>*
-  %vecload = load <2 x i32>, <2 x i32>* %arrayidx_v4, align 4
+  %tmp7 = getelementptr inbounds i32, ptr %tmp5, i64 %tmp6
+  %vecload = load <2 x i32>, ptr %tmp7, align 4
   %tmp8 = extractelement <2 x i32> %vecload, i32 0
   %tmp9 = extractelement <2 x i32> %vecload, i32 1
   %tmp10 = icmp eq i32 %tmp8, 0
   %tmp11 = select i1 %tmp10, i32 32, i32 %tmp8
   %tmp12 = icmp eq i32 %tmp9, 0
   %tmp13 = select i1 %tmp12, i32 32, i32 %tmp9
-  %tmp14 = getelementptr inbounds i32, i32* %tmp4, i64 %tmp6
+  %tmp14 = getelementptr inbounds i32, ptr %tmp4, i64 %tmp6
   %tmp15 = insertelement <2 x i32> poison, i32 %tmp11, i32 0
   %tmp16 = insertelement <2 x i32> %tmp15, i32 %tmp13, i32 1
-  %arrayidx_v41 = bitcast i32* %tmp14 to <2 x i32>*
-  store <2 x i32> %tmp16, <2 x i32>* %arrayidx_v41, align 4
+  store <2 x i32> %tmp16, ptr %tmp14, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
index 2080c51b66fbd..300aa1b1f186b 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/old-pass-regressions.ll
@@ -8,13 +8,13 @@
 ; Should generate flat load
 
 ; CHECK-LABEL: @generic_address_bitcast_const(
-; CHECK: %vecload1 = load <2 x double>, <2 x double> addrspace(1)* bitcast (double addrspace(1)* getelementptr inbounds ([100 x double], [100 x double] addrspace(1)* @data, i64 0, i64 4) to <2 x double> addrspace(1)*), align 8
-define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, i32 addrspace(1)* nocapture %results) #0 {
+; CHECK: %vecload1 = load <2 x double>, ptr addrspace(1) getelementptr inbounds ([100 x double], ptr addrspace(1) @data, i64 0, i64 4), align 8
+define amdgpu_kernel void @generic_address_bitcast_const(i64 %arg0, ptr addrspace(1) nocapture %results) #0 {
 entry:
   %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = add i64 %tmp2, %arg0
-  %vecload1 = load <2 x double>, <2 x double>* bitcast (double* getelementptr ([100 x double], [100 x double]* addrspacecast ([100 x double] addrspace(1)* @data to [100 x double]*), i64 0, i64 4) to <2 x double>*), align 8
+  %vecload1 = load <2 x double>, ptr bitcast (ptr getelementptr ([100 x double], ptr addrspacecast (ptr addrspace(1) @data to ptr), i64 0, i64 4) to ptr), align 8
   %cmp = fcmp ord <2 x double> %vecload1, zeroinitializer
   %sext = sext <2 x i1> %cmp to <2 x i64>
   %tmp4 = extractelement <2 x i64> %sext, i64 0
@@ -23,82 +23,79 @@ entry:
   %tmp7 = lshr i64 %tmp6, 63
   %tmp8 = trunc i64 %tmp7 to i32
   %idxprom = and i64 %tmp3, 4294967295
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %idxprom
-  store i32 %tmp8, i32 addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %idxprom
+  store i32 %tmp8, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 @generic_address_bug9749.val = internal addrspace(1) global float 0.0, align 4
 
-declare i32 @_Z9get_fencePv(i8*)
+declare i32 @_Z9get_fencePv(ptr)
 %opencl.pipe_t = type opaque
 
 ; This is a compile time assert bug, but we still want to check optimization
 ; is performed to generate ld_global.
 ; CHECK-LABEL: @generic_address_pipe_bug9673(
-; CHECK: %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
-; CHECK: %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
-; CHECK: %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
-define amdgpu_kernel void @generic_address_pipe_bug9673(%opencl.pipe_t addrspace(3)* nocapture %in_pipe, i32 addrspace(1)* nocapture %dst) #0 {
+; CHECK: %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
+; CHECK: %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
+define amdgpu_kernel void @generic_address_pipe_bug9673(ptr addrspace(3) nocapture %in_pipe, ptr addrspace(1) nocapture %dst) #0 {
 entry:
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()
-  %tmp1 = bitcast %opencl.pipe_t addrspace(3)* %in_pipe to i32 addrspace(3)*
-  %add.ptr = getelementptr inbounds i32, i32 addrspace(3)* %tmp1, i32 2
-  %tmp2 = load i32, i32 addrspace(3)* %add.ptr, align 4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %dst, i32 %tmp
-  store i32 %tmp2, i32 addrspace(1)* %arrayidx, align 4
+  %add.ptr = getelementptr inbounds i32, ptr addrspace(3) %in_pipe, i32 2
+  %tmp2 = load i32, ptr addrspace(3) %add.ptr, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %dst, i32 %tmp
+  store i32 %tmp2, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 ; Should generate flat load
 ; CHECK-LABEL: @generic_address_bug9749(
 ; CHECK: br i1
-; CHECK: load float, float*
+; CHECK: load float, ptr
 ; CHECK: br label
-define amdgpu_kernel void @generic_address_bug9749(i32 addrspace(1)* nocapture %results) #0 {
+define amdgpu_kernel void @generic_address_bug9749(ptr addrspace(1) nocapture %results) #0 {
 entry:
-  %ptr = alloca float*, align 8, addrspace(5)
+  %ptr = alloca ptr, align 8, addrspace(5)
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp1 = zext i32 %tmp to i64
-  store float 0x3FB99999A0000000, float addrspace(1)* @generic_address_bug9749.val, align 4
-  store volatile float* addrspacecast (float addrspace(1)* @generic_address_bug9749.val to float*), float* addrspace(5)* %ptr, align 8
-  %tmp2 = load volatile float*, float* addrspace(5)* %ptr, align 8
-  %tmp3 = load float, float addrspace(1)* @generic_address_bug9749.val, align 4
-  %tmp4 = bitcast float* %tmp2 to i8*
-  %call.i = call i32 @_Z9get_fencePv(i8* %tmp4) #1
+  store float 0x3FB99999A0000000, ptr addrspace(1) @generic_address_bug9749.val, align 4
+  store volatile ptr addrspacecast (ptr addrspace(1) @generic_address_bug9749.val to ptr), ptr addrspace(5) %ptr, align 8
+  %tmp2 = load volatile ptr, ptr addrspace(5) %ptr, align 8
+  %tmp3 = load float, ptr addrspace(1) @generic_address_bug9749.val, align 4
+  %call.i = call i32 @_Z9get_fencePv(ptr %tmp2) #1
   %switch.i.i = icmp ult i32 %call.i, 4
   br i1 %switch.i.i, label %if.end.i, label %helperFunction.exit
 
 if.end.i:                                         ; preds = %entry
-  %tmp5 = load float, float* %tmp2, align 4
+  %tmp5 = load float, ptr %tmp2, align 4
   %not.cmp.i = fcmp oeq float %tmp5, %tmp3
   %phitmp = zext i1 %not.cmp.i to i32
   br label %helperFunction.exit
 
 helperFunction.exit:                              ; preds = %if.end.i, %entry
   %retval.0.i = phi i32 [ 0, %entry ], [ %phitmp, %if.end.i ]
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %results, i64 %tmp1
-  store i32 %retval.0.i, i32 addrspace(1)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %results, i64 %tmp1
+  store i32 %retval.0.i, ptr addrspace(1) %arrayidx, align 4
   ret void
 }
 
 ; CHECK-LABEL: @generic_address_opt_phi_bug9776_simple_phi_kernel(
-; CHECK: phi i32 addrspace(3)*
-; CHECK: store i32 %i.03, i32 addrspace(3)* %
-define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(i32 addrspace(3)* nocapture %in, i32 %numElems) #0 {
+; CHECK: phi ptr addrspace(3)
+; CHECK: store i32 %i.03, ptr addrspace(3) %
+define amdgpu_kernel void @generic_address_opt_phi_bug9776_simple_phi_kernel(ptr addrspace(3) nocapture %in, i32 %numElems) #0 {
 entry:
   %cmp1 = icmp eq i32 %numElems, 0
   br i1 %cmp1, label %for.end, label %for.body.lr.ph
 
 for.body.lr.ph:                                   ; preds = %entry
-  %tmp = addrspacecast i32 addrspace(3)* %in to i32*
+  %tmp = addrspacecast ptr addrspace(3) %in to ptr
   br label %for.body
 
 for.body:                                         ; preds = %for.body, %for.body.lr.ph
   %i.03 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
-  %ptr.02 = phi i32* [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
-  store i32 %i.03, i32* %ptr.02, align 4
-  %add.ptr = getelementptr inbounds i32, i32* %ptr.02, i64 4
+  %ptr.02 = phi ptr [ %tmp, %for.body.lr.ph ], [ %add.ptr, %for.body ]
+  store i32 %i.03, ptr %ptr.02, align 4
+  %add.ptr = getelementptr inbounds i32, ptr %ptr.02, i64 4
   %inc = add nuw i32 %i.03, 1
   %exitcond = icmp eq i32 %inc, %numElems
   br i1 %exitcond, label %for.end, label %for.body
@@ -108,31 +105,29 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; CHECK-LABEL: @generic_address_bug9899(
-; CHECK: %vecload = load <2 x i32>, <2 x i32> addrspace(3)*
-; CHECK: store <2 x i32> %tmp16, <2 x i32> addrspace(3)*
-define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, i32 addrspace(3)* nocapture %sourceA, i32 addrspace(3)* nocapture %destValues) #0 {
+; CHECK: %vecload = load <2 x i32>, ptr addrspace(3)
+; CHECK: store <2 x i32> %tmp16, ptr addrspace(3)
+define amdgpu_kernel void @generic_address_bug9899(i64 %arg0, ptr addrspace(3) nocapture %sourceA, ptr addrspace(3) nocapture %destValues) #0 {
 entry:
   %tmp1 = call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = zext i32 %tmp1 to i64
   %tmp3 = add i64 %tmp2, %arg0
   %sext = shl i64 %tmp3, 32
-  %tmp4 = addrspacecast i32 addrspace(3)* %destValues to i32*
-  %tmp5 = addrspacecast i32 addrspace(3)* %sourceA to i32*
+  %tmp4 = addrspacecast ptr addrspace(3) %destValues to ptr
+  %tmp5 = addrspacecast ptr addrspace(3) %sourceA to ptr
   %tmp6 = ashr exact i64 %sext, 31
-  %tmp7 = getelementptr inbounds i32, i32* %tmp5, i64 %tmp6
-  %arrayidx_v4 = bitcast i32* %tmp7 to <2 x i32>*
-  %vecload = load <2 x i32>, <2 x i32>* %arrayidx_v4, align 4
+  %tmp7 = getelementptr inbounds i32, ptr %tmp5, i64 %tmp6
+  %vecload = load <2 x i32>, ptr %tmp7, align 4
   %tmp8 = extractelement <2 x i32> %vecload, i32 0
   %tmp9 = extractelement <2 x i32> %vecload, i32 1
   %tmp10 = icmp eq i32 %tmp8, 0
   %tmp11 = select i1 %tmp10, i32 32, i32 %tmp8
   %tmp12 = icmp eq i32 %tmp9, 0
   %tmp13 = select i1 %tmp12, i32 32, i32 %tmp9
-  %tmp14 = getelementptr inbounds i32, i32* %tmp4, i64 %tmp6
+  %tmp14 = getelementptr inbounds i32, ptr %tmp4, i64 %tmp6
   %tmp15 = insertelement <2 x i32> undef, i32 %tmp11, i32 0
   %tmp16 = insertelement <2 x i32> %tmp15, i32 %tmp13, i32 1
-  %arrayidx_v41 = bitcast i32* %tmp14 to <2 x i32>*
-  store <2 x i32> %tmp16, <2 x i32>* %arrayidx_v41, align 4
+  store <2 x i32> %tmp16, ptr %tmp14, align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
index ee0bb6319fdc0..bf4891355181b 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/ptrmask.ll
@@ -3,105 +3,105 @@
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
-define i8 @ptrmask_cast_local_to_flat(i8 addrspace(3)* %src.ptr, i64 %mask) {
+define i8 @ptrmask_cast_local_to_flat(ptr addrspace(3) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(3)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(3) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_private_to_flat(i8 addrspace(5)* %src.ptr, i64 %mask) {
+define i8 @ptrmask_cast_private_to_flat(ptr addrspace(5) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_private_to_flat(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(5)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(5)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(5) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_region_to_flat(i8 addrspace(2)* %src.ptr, i64 %mask) {
+define i8 @ptrmask_cast_region_to_flat(ptr addrspace(2) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_region_to_flat(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(2)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(2) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(2)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(2) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_global_to_flat(i8 addrspace(1)* %src.ptr, i64 %mask) {
+define i8 @ptrmask_cast_global_to_flat(ptr addrspace(1) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_global_to_flat(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(1)* @llvm.ptrmask.p1i8.i64(i8 addrspace(1)* [[SRC_PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(1)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[SRC_PTR:%.*]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(1)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(1) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_999_to_flat(i8 addrspace(999)* %src.ptr, i64 %mask) {
+define i8 @ptrmask_cast_999_to_flat(ptr addrspace(999) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_999_to_flat(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(999)* @llvm.ptrmask.p999i8.i64(i8 addrspace(999)* [[SRC_PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(999)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(999) @llvm.ptrmask.p999.i64(ptr addrspace(999) [[SRC_PTR:%.*]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(999) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(999)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(999) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_flat_to_local(i8* %ptr, i64 %mask) {
+define i8 @ptrmask_cast_flat_to_local(ptr %ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_flat_to_local(
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8* [[MASKED]] to i8 addrspace(3)*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[CAST]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr [[MASKED]] to ptr addrspace(3)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[CAST]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %ptr, i64 %mask)
-  %cast = addrspacecast i8* %masked to i8 addrspace(3)*
-  %load = load i8, i8 addrspace(3)* %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 %mask)
+  %cast = addrspacecast ptr %masked to ptr addrspace(3)
+  %load = load i8, ptr addrspace(3) %cast
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_flat_to_private(i8* %ptr, i64 %mask) {
+define i8 @ptrmask_cast_flat_to_private(ptr %ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_flat_to_private(
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8* [[MASKED]] to i8 addrspace(5)*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(5)* [[CAST]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr [[MASKED]] to ptr addrspace(5)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(5) [[CAST]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %ptr, i64 %mask)
-  %cast = addrspacecast i8* %masked to i8 addrspace(5)*
-  %load = load i8, i8 addrspace(5)* %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 %mask)
+  %cast = addrspacecast ptr %masked to ptr addrspace(5)
+  %load = load i8, ptr addrspace(5) %cast
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_flat_to_global(i8* %ptr, i64 %mask) {
+define i8 @ptrmask_cast_flat_to_global(ptr %ptr, i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_flat_to_global(
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[PTR:%.*]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8* [[MASKED]] to i8 addrspace(1)*
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(1)* [[CAST]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[PTR:%.*]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr [[MASKED]] to ptr addrspace(1)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(1) [[CAST]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %ptr, i64 %mask)
-  %cast = addrspacecast i8* %masked to i8 addrspace(1)*
-  %load = load i8, i8 addrspace(1)* %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %ptr, i64 %mask)
+  %cast = addrspacecast ptr %masked to ptr addrspace(1)
+  %load = load i8, ptr addrspace(1) %cast
   ret i8 %load
 }
 
@@ -110,262 +110,262 @@ define i8 @ptrmask_cast_flat_to_global(i8* %ptr, i64 %mask) {
 
 define i8 @ptrmask_cast_local_to_flat_global(i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_global(
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* addrspacecast (i8 addrspace(3)* @lds0 to i8*), i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* addrspacecast (i8 addrspace(3)* @lds0 to i8*), i64 %mask)
-  %load = load i8, i8* %masked, align 1
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), i64 %mask)
+  %load = load i8, ptr %masked, align 1
   ret i8 %load
 }
 
 define i8 @ptrmask_cast_global_to_flat_global(i64 %mask) {
 ; CHECK-LABEL: @ptrmask_cast_global_to_flat_global(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(1)* @llvm.ptrmask.p1i8.i64(i8 addrspace(1)* @gv, i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(1)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) @gv, i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* addrspacecast (i8 addrspace(1)* @gv to i8*), i64 %mask)
-  %load = load i8, i8* %masked, align 1
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr addrspacecast (ptr addrspace(1) @gv to ptr), i64 %mask)
+  %load = load i8, ptr %masked, align 1
   ret i8 %load
 }
 
-define i8 @multi_ptrmask_cast_global_to_flat(i8 addrspace(1)* %src.ptr, i64 %mask) {
+define i8 @multi_ptrmask_cast_global_to_flat(ptr addrspace(1) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @multi_ptrmask_cast_global_to_flat(
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, i8 addrspace(1)* [[SRC_PTR:%.*]], align 1
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(1)* @llvm.ptrmask.p1i8.i64(i8 addrspace(1)* [[SRC_PTR]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, i8 addrspace(1)* [[TMP1]], align 1
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, ptr addrspace(1) [[SRC_PTR:%.*]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1) [[SRC_PTR]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 1
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[LOAD0]], [[LOAD1]]
 ; CHECK-NEXT:    ret i8 [[ADD]]
 ;
-  %cast = addrspacecast i8 addrspace(1)* %src.ptr to i8*
-  %load0 = load i8, i8* %cast
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load1 = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(1) %src.ptr to ptr
+  %load0 = load i8, ptr %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load1 = load i8, ptr %masked
   %add = add i8 %load0, %load1
   ret i8 %add
 }
 
 ; Can't rewrite the ptrmask, but can rewrite other use instructions
-define i8 @multi_ptrmask_cast_local_to_flat(i8 addrspace(3)* %src.ptr, i64 %mask) {
+define i8 @multi_ptrmask_cast_local_to_flat(ptr addrspace(3) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @multi_ptrmask_cast_local_to_flat(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(3)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, i8 addrspace(3)* [[SRC_PTR]], align 1
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(3) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, ptr addrspace(3) [[SRC_PTR]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[LOAD0]], [[LOAD1]]
 ; CHECK-NEXT:    ret i8 [[ADD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %load0 = load i8, i8* %cast
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load1 = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %load0 = load i8, ptr %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load1 = load i8, ptr %masked
   %add = add i8 %load0, %load1
   ret i8 %add
 }
 
-define i8 @multi_ptrmask_cast_region_to_flat(i8 addrspace(2)* %src.ptr, i64 %mask) {
+define i8 @multi_ptrmask_cast_region_to_flat(ptr addrspace(2) %src.ptr, i64 %mask) {
 ; CHECK-LABEL: @multi_ptrmask_cast_region_to_flat(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(2)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, i8 addrspace(2)* [[SRC_PTR]], align 1
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 [[MASK:%.*]])
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(2) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[LOAD0:%.*]] = load i8, ptr addrspace(2) [[SRC_PTR]], align 1
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 [[MASK:%.*]])
+; CHECK-NEXT:    [[LOAD1:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[LOAD0]], [[LOAD1]]
 ; CHECK-NEXT:    ret i8 [[ADD]]
 ;
-  %cast = addrspacecast i8 addrspace(2)* %src.ptr to i8*
-  %load0 = load i8, i8* %cast
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %mask)
-  %load1 = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(2) %src.ptr to ptr
+  %load0 = load i8, ptr %cast
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %mask)
+  %load1 = load i8, ptr %masked
   %add = add i8 %load0, %load1
   ret i8 %add
 }
 
 ; Do not fold this since it clears a single high bit.
-define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffeffffffff(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffeffffffff(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_fffffffeffffffff(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(3)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 -4294967297)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(3) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 -4294967297)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -4294967297)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -4294967297)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
 ; Do not fold this since it clears a single high bit.
-define i8 @ptrmask_cast_local_to_flat_const_mask_7fffffffffffffff(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_7fffffffffffffff(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_7fffffffffffffff(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(3)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i64(i8* [[CAST]], i64 9223372036854775807)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(3) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[CAST]], i64 9223372036854775807)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 9223372036854775807)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 9223372036854775807)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffff00000000(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffff00000000(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffff00000000(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 0)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 0)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -4294967296)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -4294967296)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffff80000000(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffff80000000(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffff80000000(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -2147483648)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -2147483648)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -2147483648)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -2147483648)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
 ; Test some align-down patterns. These only touch the low bits, which are preserved through the cast.
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffff0000(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffff0000(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffffffff0000(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -65536)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -65536)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -65536)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -65536)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffff00(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffff00(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffffffffff00(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -256)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -256)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -256)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -256)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffe0(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffe0(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffe0(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -32)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -32)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -32)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -32)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff0(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff0(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff0(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -16)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -16)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -16)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -16)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff8(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff8(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_fffffffffffffff8(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -8)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -8)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -8)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -8)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffc(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffc(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffc(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -4)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -4)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -4)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -4)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffe(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffe(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_fffffffffffffffe(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -2)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -2)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -2)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -2)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffff(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffff(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_ffffffffffffffff(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 -1)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP1]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 -1)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP1]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 -1)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 -1)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
 ; Make sure non-constant masks can also be handled.
-define i8 @ptrmask_cast_local_to_flat_load_range_mask(i8 addrspace(3)* %src.ptr, i64 addrspace(1)* %mask.ptr) {
+define i8 @ptrmask_cast_local_to_flat_load_range_mask(ptr addrspace(3) %src.ptr, ptr addrspace(1) %mask.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_load_range_mask(
-; CHECK-NEXT:    [[LOAD_MASK:%.*]] = load i64, i64 addrspace(1)* [[MASK_PTR:%.*]], align 8, !range !0
+; CHECK-NEXT:    [[LOAD_MASK:%.*]] = load i64, ptr addrspace(1) [[MASK_PTR:%.*]], align 8, !range !0
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[LOAD_MASK]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* [[SRC_PTR:%.*]], i32 [[TMP1]])
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(3)* [[TMP2]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[SRC_PTR:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(3) [[TMP2]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %load.mask = load i64, i64 addrspace(1)* %mask.ptr, align 8, !range !0
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %cast, i64 %load.mask)
-  %load = load i8, i8* %masked
+  %load.mask = load i64, ptr addrspace(1) %mask.ptr, align 8, !range !0
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i64(ptr %cast, i64 %load.mask)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
 ; This should not be folded, as the mask is implicitly zero extended,
 ; so it would clear the high bits.
-define i8 @ptrmask_cast_local_to_flat_const_mask_32bit_neg4(i8 addrspace(3)* %src.ptr) {
+define i8 @ptrmask_cast_local_to_flat_const_mask_32bit_neg4(ptr addrspace(3) %src.ptr) {
 ; CHECK-LABEL: @ptrmask_cast_local_to_flat_const_mask_32bit_neg4(
-; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast i8 addrspace(3)* [[SRC_PTR:%.*]] to i8*
-; CHECK-NEXT:    [[MASKED:%.*]] = call i8* @llvm.ptrmask.p0i8.i32(i8* [[CAST]], i32 -4)
-; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8* [[MASKED]], align 1
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr addrspace(3) [[SRC_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[MASKED:%.*]] = call ptr @llvm.ptrmask.p0.i32(ptr [[CAST]], i32 -4)
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, ptr [[MASKED]], align 1
 ; CHECK-NEXT:    ret i8 [[LOAD]]
 ;
-  %cast = addrspacecast i8 addrspace(3)* %src.ptr to i8*
-  %masked = call i8* @llvm.ptrmask.p0i8.i32(i8* %cast, i32 -4)
-  %load = load i8, i8* %masked
+  %cast = addrspacecast ptr addrspace(3) %src.ptr to ptr
+  %masked = call ptr @llvm.ptrmask.p0.i32(ptr %cast, i32 -4)
+  %load = load i8, ptr %masked
   ret i8 %load
 }
 
-declare i8* @llvm.ptrmask.p0i8.i64(i8*, i64) #0
-declare i8* @llvm.ptrmask.p0i8.i32(i8*, i32) #0
-declare i8 addrspace(5)* @llvm.ptrmask.p5i8.i32(i8 addrspace(5)*, i32) #0
-declare i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)*, i32) #0
-declare i8 addrspace(1)* @llvm.ptrmask.p1i8.i64(i8 addrspace(1)*, i64) #0
+declare ptr @llvm.ptrmask.p0.i64(ptr, i64) #0
+declare ptr @llvm.ptrmask.p0.i32(ptr, i32) #0
+declare ptr addrspace(5) @llvm.ptrmask.p5.i32(ptr addrspace(5), i32) #0
+declare ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3), i32) #0
+declare ptr addrspace(1) @llvm.ptrmask.p1.i64(ptr addrspace(1), i64) #0
 
 attributes #0 = { nounwind readnone speculatable willreturn }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/redundant-addrspacecast.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/redundant-addrspacecast.ll
index 73d8039457686..145c94d551b7f 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/redundant-addrspacecast.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/redundant-addrspacecast.ll
@@ -5,23 +5,23 @@
 
 ; Make sure there is only one addrspacecast. The original cast should
 ; not be cloned to satisfy the second user.
-define void @bar(%0 addrspace(1)* %orig.ptr) {
+define void @bar(ptr addrspace(1) %orig.ptr) {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:  bb:
-; CHECK-NEXT:    [[ORIG_CAST:%.*]] = addrspacecast [[TMP0:%.*]] addrspace(1)* [[ORIG_PTR:%.*]] to %0*
-; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr inbounds [[TMP0]], %0* [[ORIG_CAST]], i64 0, i32 1
-; CHECK-NEXT:    call void @foo(i8* [[GEP0]])
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds [[TMP0]], %0* [[ORIG_CAST]], i64 0, i32 2
-; CHECK-NEXT:    call void @foo(i8* [[GEP1]])
+; CHECK-NEXT:    [[ORIG_CAST:%.*]] = addrspacecast ptr addrspace(1) [[ORIG_PTR:%.*]] to ptr
+; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr inbounds [[TMP0:%.*]], ptr [[ORIG_CAST]], i64 0, i32 1
+; CHECK-NEXT:    call void @foo(ptr [[GEP0]])
+; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds [[TMP0]], ptr [[ORIG_CAST]], i64 0, i32 2
+; CHECK-NEXT:    call void @foo(ptr [[GEP1]])
 ; CHECK-NEXT:    ret void
 ;
 bb:
-  %orig.cast = addrspacecast %0 addrspace(1)* %orig.ptr to %0*
-  %gep0 = getelementptr inbounds %0, %0* %orig.cast, i64 0, i32 1
-  call void @foo(i8* %gep0)
-  %gep1 = getelementptr inbounds %0, %0* %orig.cast, i64 0, i32 2
-  call void @foo(i8* %gep1)
+  %orig.cast = addrspacecast ptr addrspace(1) %orig.ptr to ptr
+  %gep0 = getelementptr inbounds %0, ptr %orig.cast, i64 0, i32 1
+  call void @foo(ptr %gep0)
+  %gep1 = getelementptr inbounds %0, ptr %orig.cast, i64 0, i32 2
+  call void @foo(ptr %gep1)
   ret void
 }
 
-declare void @foo(i8*)
+declare void @foo(ptr)

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
index 3acd21c739585..cd2f574c7a708 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/select.ll
@@ -4,50 +4,50 @@
 ;  this doesn't do something insane on non-canonical IR.
 
 ; CHECK-LABEL: @return_select_group_flat(
-; CHECK-NEXT: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK-NEXT: %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-; CHECK-NEXT: %select = select i1 %c, i32* %cast0, i32* %cast1
-; CHECK-NEXT: ret i32* %select
-define i32* @return_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %select = select i1 %c, i32* %cast0, i32* %cast1
-  ret i32* %select
+; CHECK-NEXT: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK-NEXT: %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+; CHECK-NEXT: %select = select i1 %c, ptr %cast0, ptr %cast1
+; CHECK-NEXT: ret ptr %select
+define ptr @return_select_group_flat(i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %select = select i1 %c, ptr %cast0, ptr %cast1
+  ret ptr %select
 }
 
 ; CHECK-LABEL: @store_select_group_flat(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1
-; CHECK: store i32 -1, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %select = select i1 %c, i32* %cast0, i32* %cast1
-  store i32 -1, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1
+; CHECK: store i32 -1, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat(i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %select = select i1 %c, ptr %cast0, ptr %cast1
+  store i32 -1, ptr %select
   ret void
 }
 
 ; Make sure metadata is preserved
 ; CHECK-LABEL: @load_select_group_flat_md(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1, !prof !0
-; CHECK: %load = load i32, i32 addrspace(3)* %select
-define i32 @load_select_group_flat_md(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* %group.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(3)* %group.ptr.1 to i32*
-  %select = select i1 %c, i32* %cast0, i32* %cast1, !prof !0
-  %load = load i32, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1, !prof !0
+; CHECK: %load = load i32, ptr addrspace(3) %select
+define i32 @load_select_group_flat_md(i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) %group.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(3) %group.ptr.1 to ptr
+  %select = select i1 %c, ptr %cast0, ptr %cast1, !prof !0
+  %load = load i32, ptr %select
   ret i32 %load
 }
 
 ; CHECK-LABEL: @store_select_mismatch_group_private_flat(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %cast1 = addrspacecast i32 addrspace(5)* %private.ptr.1 to i32*
-; CHECK: %select = select i1 %c, i32* %cast0, i32* %cast1
-; CHECK: store i32 -1, i32* %select
-define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(5)* %private.ptr.1) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %cast1 = addrspacecast i32 addrspace(5)* %private.ptr.1 to i32*
-  %select = select i1 %c, i32* %cast0, i32* %cast1
-  store i32 -1, i32* %select
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %cast1 = addrspacecast ptr addrspace(5) %private.ptr.1 to ptr
+; CHECK: %select = select i1 %c, ptr %cast0, ptr %cast1
+; CHECK: store i32 -1, ptr %select
+define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(5) %private.ptr.1) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %cast1 = addrspacecast ptr addrspace(5) %private.ptr.1 to ptr
+  %select = select i1 %c, ptr %cast0, ptr %cast1
+  store i32 -1, ptr %select
   ret void
 }
 
@@ -55,207 +55,207 @@ define amdgpu_kernel void @store_select_mismatch_group_private_flat(i1 %c, i32 a
 @lds1 = internal addrspace(3) global i32 456, align 4
 
 ; CHECK-LABEL: @constexpr_select_group_flat(
-; CHECK: %tmp = load i32, i32 addrspace(3)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(3)* @lds0, i32 addrspace(3)* @lds1)
+; CHECK: %tmp = load i32, ptr addrspace(3) select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspace(3) @lds0, ptr addrspace(3) @lds1)
 define i32 @constexpr_select_group_flat() #0 {
 bb:
-  %tmp = load i32, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), i32* addrspacecast (i32 addrspace(3)* @lds1 to i32*))
+  %tmp = load i32, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), ptr addrspacecast (ptr addrspace(3) @lds1 to ptr))
   ret i32 %tmp
 }
 
 ; CHECK-LABEL: @constexpr_select_group_global_flat_mismatch(
-; CHECK: %tmp = load i32, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), i32* addrspacecast (i32 addrspace(1)* @global0 to i32*))
+; CHECK: %tmp = load i32, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), ptr addrspacecast (ptr addrspace(1) @global0 to ptr))
 define i32 @constexpr_select_group_global_flat_mismatch() #0 {
 bb:
-  %tmp = load i32, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), i32* addrspacecast (i32 addrspace(1)* @global0 to i32*))
+  %tmp = load i32, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), ptr addrspacecast (ptr addrspace(1) @global0 to ptr))
   ret i32 %tmp
 }
 
 ; CHECK-LABEL: @store_select_group_flat_null(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32* null to i32 addrspace(3)*)
-; CHECK: store i32 -1, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* null
-  store i32 -1, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3))
+; CHECK: store i32 -1, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_null(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr null
+  store i32 -1, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_null_swap(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* addrspacecast (i32* null to i32 addrspace(3)*), i32 addrspace(3)* %group.ptr.0
-; CHECK: store i32 -1, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_null_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* null, i32* %cast0
-  store i32 -1, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3)), ptr addrspace(3) %group.ptr.0
+; CHECK: store i32 -1, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_null_swap(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr null, ptr %cast0
+  store i32 -1, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_undef(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* undef
-; CHECK: store i32 -1, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_undef(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* undef
-  store i32 -1, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) undef
+; CHECK: store i32 -1, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_undef(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr undef
+  store i32 -1, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_undef_swap(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* undef, i32 addrspace(3)* %group.ptr.0
-; CHECK: store i32 -1, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_undef_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* undef, i32* %cast0
-  store i32 -1, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) undef, ptr addrspace(3) %group.ptr.0
+; CHECK: store i32 -1, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_undef_swap(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr undef, ptr %cast0
+  store i32 -1, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_gep_group_flat_null(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32* null to i32 addrspace(3)*)
-; CHECK: %gep = getelementptr i32, i32 addrspace(3)* %select, i64 16
-; CHECK: store i32 -1, i32 addrspace(3)* %gep
-define amdgpu_kernel void @store_select_gep_group_flat_null(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* null
-  %gep = getelementptr i32, i32* %select, i64 16
-  store i32 -1, i32* %gep
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) addrspacecast (ptr null to ptr addrspace(3))
+; CHECK: %gep = getelementptr i32, ptr addrspace(3) %select, i64 16
+; CHECK: store i32 -1, ptr addrspace(3) %gep
+define amdgpu_kernel void @store_select_gep_group_flat_null(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr null
+  %gep = getelementptr i32, ptr %select, i64 16
+  store i32 -1, ptr %gep
   ret void
 }
 
 @global0 = internal addrspace(1) global i32 123, align 4
 
 ; CHECK-LABEL: @store_select_group_flat_constexpr(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* @lds1
-; CHECK: store i32 7, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(3)* @lds1 to i32*)
-  store i32 7, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) @lds1
+; CHECK: store i32 7, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_constexpr(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(3) @lds1 to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_inttoptr_flat(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* addrspacecast (i32* inttoptr (i64 12345 to i32*) to i32 addrspace(3)*)
-; CHECK: store i32 7, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_inttoptr_flat(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* inttoptr (i64 12345 to i32*)
-  store i32 7, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) addrspacecast (ptr inttoptr (i64 12345 to ptr) to ptr addrspace(3))
+; CHECK: store i32 7, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_inttoptr_flat(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr inttoptr (i64 12345 to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_inttoptr_group(
-; CHECK: %select = select i1 %c, i32 addrspace(3)* %group.ptr.0, i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*)
-; CHECK-NEXT: store i32 7, i32 addrspace(3)* %select
-define amdgpu_kernel void @store_select_group_flat_inttoptr_group(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(3)* inttoptr (i32 400 to i32 addrspace(3)*) to i32*)
-  store i32 7, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspace(3) %group.ptr.0, ptr addrspace(3) inttoptr (i32 400 to ptr addrspace(3))
+; CHECK-NEXT: store i32 7, ptr addrspace(3) %select
+define amdgpu_kernel void @store_select_group_flat_inttoptr_group(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(3) inttoptr (i32 400 to ptr addrspace(3)) to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(1)* @global0 to i32*)
-; CHECK: store i32 7, i32* %select
-define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(1)* @global0 to i32*)
-  store i32 7, i32* %select
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(1) @global0 to ptr)
+; CHECK: store i32 7, ptr %select
+define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(1) @global0 to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_flat_constexpr_swap(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %select = select i1 %c, i32* addrspacecast (i32 addrspace(1)* @global0 to i32*), i32* %cast0
-; CHECK: store i32 7, i32* %select
-define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr_swap(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* addrspacecast (i32 addrspace(1)* @global0 to i32*), i32* %cast0
-  store i32 7, i32* %select
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %select = select i1 %c, ptr addrspacecast (ptr addrspace(1) @global0 to ptr), ptr %cast0
+; CHECK: store i32 7, ptr %select
+define amdgpu_kernel void @store_select_group_global_mismatch_flat_constexpr_swap(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr addrspacecast (ptr addrspace(1) @global0 to ptr), ptr %cast0
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_null_null(
-; CHECK: %select = select i1 %c, i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)
-; CHECK: store i32 7, i32* %select
+; CHECK: %select = select i1 %c, ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)
+; CHECK: store i32 7, ptr %select
 define amdgpu_kernel void @store_select_group_global_mismatch_null_null(i1 %c) #0 {
-  %select = select i1 %c, i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)
-  store i32 7, i32* %select
+  %select = select i1 %c, ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_null_null_constexpr(
-; CHECK: store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+; CHECK: store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_null_null_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_gv_null_constexpr(
-; CHECK: store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+; CHECK: store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_gv_null_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* @lds0 to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) @lds0 to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_null_gv_constexpr(
-; CHECK: store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* @global0 to i32*)), align 4
+; CHECK: store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) @global0 to ptr)), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_null_gv_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* @global0 to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) @global0 to ptr)), align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_null_constexpr(
-; CHECK: store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+; CHECK: store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) inttoptr (i64 123 to ptr addrspace(3)) to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_null_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* inttoptr (i64 123 to i32 addrspace(3)*) to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) inttoptr (i64 123 to ptr addrspace(3)) to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_inttoptr_flat_null_constexpr(
-; CHECK: store i32 7, i32 addrspace(1)* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32 addrspace(1)* addrspacecast (i32* inttoptr (i64 123 to i32*) to i32 addrspace(1)*), i32 addrspace(1)* null), align 4
+; CHECK: store i32 7, ptr addrspace(1) select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspace(1) addrspacecast (ptr inttoptr (i64 123 to ptr) to ptr addrspace(1)), ptr addrspace(1) null), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_inttoptr_flat_null_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* inttoptr (i64 123 to i32*), i32* addrspacecast (i32 addrspace(1)* null to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr inttoptr (i64 123 to ptr), ptr addrspacecast (ptr addrspace(1) null to ptr)), align 4
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_global_mismatch_undef_undef_constexpr(
-; CHECK: store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* undef), align 4
+; CHECK: store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr undef), align 4
 define amdgpu_kernel void @store_select_group_global_mismatch_undef_undef_constexpr() #0 {
-  store i32 7, i32* select (i1 icmp eq (i32 ptrtoint (i32 addrspace(3)* @lds1 to i32), i32 4), i32* addrspacecast (i32 addrspace(3)* null to i32*), i32* addrspacecast (i32 addrspace(1)* undef to i32*)), align 4
+  store i32 7, ptr select (i1 icmp eq (i32 ptrtoint (ptr addrspace(3) @lds1 to i32), i32 4), ptr addrspacecast (ptr addrspace(3) null to ptr), ptr addrspacecast (ptr addrspace(1) undef to ptr)), align 4
   ret void
 }
 
 @lds2 = external addrspace(3) global [1024 x i32], align 4
 
 ; CHECK-LABEL: @store_select_group_constexpr_ptrtoint(
-; CHECK: %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-; CHECK: %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32*)
-; CHECK: store i32 7, i32* %select
-define amdgpu_kernel void @store_select_group_constexpr_ptrtoint(i1 %c, i32 addrspace(3)* %group.ptr.0) #0 {
-  %cast0 = addrspacecast i32 addrspace(3)* %group.ptr.0 to i32*
-  %select = select i1 %c, i32* %cast0, i32* addrspacecast (i32 addrspace(1)* inttoptr (i32 add (i32 ptrtoint ([1024 x i32] addrspace(3)* @lds2 to i32), i32 124) to i32 addrspace(1)*) to i32*)
-  store i32 7, i32* %select
+; CHECK: %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+; CHECK: %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(1) inttoptr (i32 add (i32 ptrtoint (ptr addrspace(3) @lds2 to i32), i32 124) to ptr addrspace(1)) to ptr)
+; CHECK: store i32 7, ptr %select
+define amdgpu_kernel void @store_select_group_constexpr_ptrtoint(i1 %c, ptr addrspace(3) %group.ptr.0) #0 {
+  %cast0 = addrspacecast ptr addrspace(3) %group.ptr.0 to ptr
+  %select = select i1 %c, ptr %cast0, ptr addrspacecast (ptr addrspace(1) inttoptr (i32 add (i32 ptrtoint (ptr addrspace(3) @lds2 to i32), i32 124) to ptr addrspace(1)) to ptr)
+  store i32 7, ptr %select
   ret void
 }
 
 ; CHECK-LABEL: @store_select_group_flat_vector(
-; CHECK: %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32*>
-; CHECK: %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32*>
-; CHECK: %select = select i1 %c, <2 x i32*> %cast0, <2 x i32*> %cast1
-; CHECK: %extract0 = extractelement <2 x i32*> %select, i32 0
-; CHECK: %extract1 = extractelement <2 x i32*> %select, i32 1
-; CHECK: store i32 -1, i32* %extract0
-; CHECK: store i32 -2, i32* %extract1
-define amdgpu_kernel void @store_select_group_flat_vector(i1 %c, <2 x i32 addrspace(3)*> %group.ptr.0, <2 x i32 addrspace(3)*> %group.ptr.1) #0 {
-  %cast0 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.0 to <2 x i32*>
-  %cast1 = addrspacecast <2 x i32 addrspace(3)*> %group.ptr.1 to <2 x i32*>
-  %select = select i1 %c, <2 x i32*> %cast0, <2 x i32*> %cast1
-  %extract0 = extractelement <2 x i32*> %select, i32 0
-  %extract1 = extractelement <2 x i32*> %select, i32 1
-  store i32 -1, i32* %extract0
-  store i32 -2, i32* %extract1
+; CHECK: %cast0 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.0 to <2 x ptr>
+; CHECK: %cast1 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.1 to <2 x ptr>
+; CHECK: %select = select i1 %c, <2 x ptr> %cast0, <2 x ptr> %cast1
+; CHECK: %extract0 = extractelement <2 x ptr> %select, i32 0
+; CHECK: %extract1 = extractelement <2 x ptr> %select, i32 1
+; CHECK: store i32 -1, ptr %extract0
+; CHECK: store i32 -2, ptr %extract1
+define amdgpu_kernel void @store_select_group_flat_vector(i1 %c, <2 x ptr addrspace(3)> %group.ptr.0, <2 x ptr addrspace(3)> %group.ptr.1) #0 {
+  %cast0 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.0 to <2 x ptr>
+  %cast1 = addrspacecast <2 x ptr addrspace(3)> %group.ptr.1 to <2 x ptr>
+  %select = select i1 %c, <2 x ptr> %cast0, <2 x ptr> %cast1
+  %extract0 = extractelement <2 x ptr> %select, i32 0
+  %extract1 = extractelement <2 x ptr> %select, i32 1
+  store i32 -1, ptr %extract0
+  store i32 -2, ptr %extract1
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/self-phi.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/self-phi.ll
index 2f6496ab19944..f182e5c4e4e22 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/self-phi.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/self-phi.ll
@@ -1,25 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -S -infer-address-spaces %s | FileCheck %s
 
-define amdgpu_kernel void @phi_self(i8 addrspace(1)* %arg) {
+define amdgpu_kernel void @phi_self(ptr addrspace(1) %arg) {
 ; CHECK-LABEL: @phi_self(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[I:%.*]] = phi i8 addrspace(1)* [ [[I]], [[LOOP]] ], [ [[ARG:%.*]], [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[I1:%.*]] = load i8, i8 addrspace(1)* [[I]], align 1
+; CHECK-NEXT:    [[I:%.*]] = phi ptr addrspace(1) [ [[I]], [[LOOP]] ], [ [[ARG:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[I1:%.*]] = load i8, ptr addrspace(1) [[I]], align 1
 ; CHECK-NEXT:    [[I2:%.*]] = icmp eq i8 [[I1]], 0
 ; CHECK-NEXT:    br i1 [[I2]], label [[LOOP]], label [[RET:%.*]]
 ; CHECK:       ret:
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %cast = addrspacecast i8 addrspace(1)* %arg to i8*
+  %cast = addrspacecast ptr addrspace(1) %arg to ptr
   br label %loop
 
 loop:
-  %i = phi i8* [%i, %loop], [%cast, %entry]
-  %i1 = load i8, i8* %i, align 1
+  %i = phi ptr [%i, %loop], [%cast, %entry]
+  %i1 = load i8, ptr %i, align 1
   %i2 = icmp eq i8 %i1, 0
   br i1 %i2, label %loop, label %ret
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/unreachable-code-assert.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/unreachable-code-assert.ll
index 73001b53634c0..f0c61bc5131bc 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/unreachable-code-assert.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/unreachable-code-assert.ll
@@ -6,8 +6,8 @@ define amdgpu_kernel void @subclass_data_assert() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    unreachable
 ; CHECK:       strlen.while11:
-; CHECK-NEXT:    [[I:%.*]] = getelementptr i8, i8* [[I]], i64 1
-; CHECK-NEXT:    [[I1:%.*]] = load i8, i8* [[I]], align 1
+; CHECK-NEXT:    [[I:%.*]] = getelementptr i8, ptr [[I]], i64 1
+; CHECK-NEXT:    [[I1:%.*]] = load i8, ptr [[I]], align 1
 ; CHECK-NEXT:    [[I2:%.*]] = icmp eq i8 [[I1]], 0
 ; CHECK-NEXT:    br i1 [[I2]], label [[STRLEN_WHILE_DONE12:%.*]], label [[STRLEN_WHILE11:%.*]]
 ; CHECK:       strlen.while.done12:
@@ -17,8 +17,8 @@ entry:
   unreachable
 
 strlen.while11:                                   ; preds = %strlen.while11
-  %i = getelementptr i8, i8* %i, i64 1
-  %i1 = load i8, i8* %i, align 1
+  %i = getelementptr i8, ptr %i, i64 1
+  %i1 = load i8, ptr %i, align 1
   %i2 = icmp eq i8 %i1, 0
   br i1 %i2, label %strlen.while.done12, label %strlen.while11
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
index 3394fe639da3a..78c9801b31eaf 100644
--- a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/volatile.ll
@@ -3,137 +3,137 @@
 ; Check that volatile users of addrspacecast are not replaced.
 
 ; CHECK-LABEL: @volatile_load_flat_from_global(
-; CHECK: load volatile i32, i32*
-; CHECK: store i32 %val, i32 addrspace(1)*
-define amdgpu_kernel void @volatile_load_flat_from_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(1)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32*
-  %val = load volatile i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+; CHECK: load volatile i32, ptr
+; CHECK: store i32 %val, ptr addrspace(1)
+define amdgpu_kernel void @volatile_load_flat_from_global(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(1) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(1) %output to ptr
+  %val = load volatile i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_load_flat_from_constant(
-; CHECK: load volatile i32, i32*
-; CHECK: store i32 %val, i32 addrspace(1)*
-define amdgpu_kernel void @volatile_load_flat_from_constant(i32 addrspace(4)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(4)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32*
-  %val = load volatile i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+; CHECK: load volatile i32, ptr
+; CHECK: store i32 %val, ptr addrspace(1)
+define amdgpu_kernel void @volatile_load_flat_from_constant(ptr addrspace(4) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(4) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(1) %output to ptr
+  %val = load volatile i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_load_flat_from_group(
-; CHECK: load volatile i32, i32*
-; CHECK: store i32 %val, i32 addrspace(3)*
-define amdgpu_kernel void @volatile_load_flat_from_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(3)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(3)* %output to i32*
-  %val = load volatile i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+; CHECK: load volatile i32, ptr
+; CHECK: store i32 %val, ptr addrspace(3)
+define amdgpu_kernel void @volatile_load_flat_from_group(ptr addrspace(3) nocapture %input, ptr addrspace(3) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(3) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(3) %output to ptr
+  %val = load volatile i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_load_flat_from_private(
-; CHECK: load volatile i32, i32*
-; CHECK: store i32 %val, i32 addrspace(5)*
-define amdgpu_kernel void @volatile_load_flat_from_private(i32 addrspace(5)* nocapture %input, i32 addrspace(5)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(5)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(5)* %output to i32*
-  %val = load volatile i32, i32* %tmp0, align 4
-  store i32 %val, i32* %tmp1, align 4
+; CHECK: load volatile i32, ptr
+; CHECK: store i32 %val, ptr addrspace(5)
+define amdgpu_kernel void @volatile_load_flat_from_private(ptr addrspace(5) nocapture %input, ptr addrspace(5) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(5) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(5) %output to ptr
+  %val = load volatile i32, ptr %tmp0, align 4
+  store i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_store_flat_to_global(
-; CHECK: load i32, i32 addrspace(1)*
-; CHECK: store volatile i32 %val, i32*
-define amdgpu_kernel void @volatile_store_flat_to_global(i32 addrspace(1)* nocapture %input, i32 addrspace(1)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(1)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(1)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store volatile i32 %val, i32* %tmp1, align 4
+; CHECK: load i32, ptr addrspace(1)
+; CHECK: store volatile i32 %val, ptr
+define amdgpu_kernel void @volatile_store_flat_to_global(ptr addrspace(1) nocapture %input, ptr addrspace(1) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(1) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(1) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store volatile i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_store_flat_to_group(
-; CHECK: load i32, i32 addrspace(3)*
-; CHECK: store volatile i32 %val, i32*
-define amdgpu_kernel void @volatile_store_flat_to_group(i32 addrspace(3)* nocapture %input, i32 addrspace(3)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(3)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(3)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store volatile i32 %val, i32* %tmp1, align 4
+; CHECK: load i32, ptr addrspace(3)
+; CHECK: store volatile i32 %val, ptr
+define amdgpu_kernel void @volatile_store_flat_to_group(ptr addrspace(3) nocapture %input, ptr addrspace(3) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(3) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(3) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store volatile i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_store_flat_to_private(
-; CHECK: load i32, i32 addrspace(5)*
-; CHECK: store volatile i32 %val, i32*
-define amdgpu_kernel void @volatile_store_flat_to_private(i32 addrspace(5)* nocapture %input, i32 addrspace(5)* nocapture %output) #0 {
-  %tmp0 = addrspacecast i32 addrspace(5)* %input to i32*
-  %tmp1 = addrspacecast i32 addrspace(5)* %output to i32*
-  %val = load i32, i32* %tmp0, align 4
-  store volatile i32 %val, i32* %tmp1, align 4
+; CHECK: load i32, ptr addrspace(5)
+; CHECK: store volatile i32 %val, ptr
+define amdgpu_kernel void @volatile_store_flat_to_private(ptr addrspace(5) nocapture %input, ptr addrspace(5) nocapture %output) #0 {
+  %tmp0 = addrspacecast ptr addrspace(5) %input to ptr
+  %tmp1 = addrspacecast ptr addrspace(5) %output to ptr
+  %val = load i32, ptr %tmp0, align 4
+  store volatile i32 %val, ptr %tmp1, align 4
   ret void
 }
 
 ; CHECK-LABEL: @volatile_atomicrmw_add_group_to_flat(
-; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32*
-; CHECK: atomicrmw volatile add i32*
-define i32 @volatile_atomicrmw_add_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = atomicrmw volatile add i32* %cast, i32 %y seq_cst
+; CHECK: addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK: atomicrmw volatile add ptr
+define i32 @volatile_atomicrmw_add_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = atomicrmw volatile add ptr %cast, i32 %y seq_cst
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @volatile_atomicrmw_add_global_to_flat(
-; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32*
-; CHECK: %ret = atomicrmw volatile add i32*
-define i32 @volatile_atomicrmw_add_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %y) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = atomicrmw volatile add i32* %cast, i32 %y seq_cst
+; CHECK: addrspacecast ptr addrspace(1) %global.ptr to ptr
+; CHECK: %ret = atomicrmw volatile add ptr
+define i32 @volatile_atomicrmw_add_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = atomicrmw volatile add ptr %cast, i32 %y seq_cst
   ret i32 %ret
 }
 
 ; CHECK-LABEL: @volatile_cmpxchg_global_to_flat(
-; CHECK: addrspacecast i32 addrspace(1)* %global.ptr to i32*
-; CHECK: cmpxchg volatile i32*
-define { i32, i1 } @volatile_cmpxchg_global_to_flat(i32 addrspace(1)* %global.ptr, i32 %cmp, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(1)* %global.ptr to i32*
-  %ret = cmpxchg volatile i32* %cast, i32 %cmp, i32 %val seq_cst monotonic
+; CHECK: addrspacecast ptr addrspace(1) %global.ptr to ptr
+; CHECK: cmpxchg volatile ptr
+define { i32, i1 } @volatile_cmpxchg_global_to_flat(ptr addrspace(1) %global.ptr, i32 %cmp, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  %ret = cmpxchg volatile ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
   ret { i32, i1 } %ret
 }
 
 ; CHECK-LABEL: @volatile_cmpxchg_group_to_flat(
-; CHECK: addrspacecast i32 addrspace(3)* %group.ptr to i32*
-; CHECK: cmpxchg volatile i32*
-define { i32, i1 } @volatile_cmpxchg_group_to_flat(i32 addrspace(3)* %group.ptr, i32 %cmp, i32 %val) #0 {
-  %cast = addrspacecast i32 addrspace(3)* %group.ptr to i32*
-  %ret = cmpxchg volatile i32* %cast, i32 %cmp, i32 %val seq_cst monotonic
+; CHECK: addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK: cmpxchg volatile ptr
+define { i32, i1 } @volatile_cmpxchg_group_to_flat(ptr addrspace(3) %group.ptr, i32 %cmp, i32 %val) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  %ret = cmpxchg volatile ptr %cast, i32 %cmp, i32 %val seq_cst monotonic
   ret { i32, i1 } %ret
 }
 
 ; CHECK-LABEL: @volatile_memset_group_to_flat(
-; CHECK: %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 true)
-define amdgpu_kernel void @volatile_memset_group_to_flat(i8 addrspace(3)* %group.ptr, i32 %y) #0 {
-  %cast = addrspacecast i8 addrspace(3)* %group.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 true)
+; CHECK: %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+; CHECK: call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 true)
+define amdgpu_kernel void @volatile_memset_group_to_flat(ptr addrspace(3) %group.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 true)
   ret void
 }
 
 ; CHECK-LABEL: @volatile_memset_global_to_flat(
-; CHECK: %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8*
-; CHECK: call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 true)
-define amdgpu_kernel void @volatile_memset_global_to_flat(i8 addrspace(1)* %global.ptr, i32 %y) #0 {
-  %cast = addrspacecast i8 addrspace(1)* %global.ptr to i8*
-  call void @llvm.memset.p0i8.i64(i8* align 4 %cast, i8 4, i64 32, i1 true)
+; CHECK: %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+; CHECK: call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 true)
+define amdgpu_kernel void @volatile_memset_global_to_flat(ptr addrspace(1) %global.ptr, i32 %y) #0 {
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  call void @llvm.memset.p0.i64(ptr align 4 %cast, i8 4, i64 32, i1 true)
   ret void
 }
 
-declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind }

diff  --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
index b4e05b2e429e7..a3ca70f27180e 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll
@@ -2,22 +2,22 @@
 
 target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
 
-%struct.bar = type { float, float* }
+%struct.bar = type { float, ptr }
 
 @var1 = local_unnamed_addr addrspace(3) externally_initialized global %struct.bar undef, align 8
 
 ; CHECK-LABEL: @bug31948(
-; CHECK: %tmp = load float*, float* addrspace(3)* getelementptr inbounds (%struct.bar, %struct.bar addrspace(3)* @var1, i64 0, i32 1), align 8
-; CHECK: %tmp1 = load float, float* %tmp, align 4
-; CHECK: store float %conv1, float* %tmp, align 4
-; CHECK: store i32 32, i32 addrspace(3)* bitcast (float* addrspace(3)* getelementptr inbounds (%struct.bar, %struct.bar addrspace(3)* @var1, i64 0, i32 1) to i32 addrspace(3)*), align 4
-define void @bug31948(float %a, float* nocapture readnone %x, float* nocapture readnone %y) local_unnamed_addr #0 {
+; CHECK: %tmp = load ptr, ptr addrspace(3) getelementptr inbounds (%struct.bar, ptr addrspace(3) @var1, i64 0, i32 1), align 8
+; CHECK: %tmp1 = load float, ptr %tmp, align 4
+; CHECK: store float %conv1, ptr %tmp, align 4
+; CHECK: store i32 32, ptr addrspace(3) getelementptr inbounds (%struct.bar, ptr addrspace(3) @var1, i64 0, i32 1), align 4
+define void @bug31948(float %a, ptr nocapture readnone %x, ptr nocapture readnone %y) local_unnamed_addr #0 {
 entry:
-  %tmp = load float*, float** getelementptr (%struct.bar, %struct.bar* addrspacecast (%struct.bar addrspace(3)* @var1 to %struct.bar*), i64 0, i32 1), align 8
-  %tmp1 = load float, float* %tmp, align 4
+  %tmp = load ptr, ptr getelementptr (%struct.bar, ptr addrspacecast (ptr addrspace(3) @var1 to ptr), i64 0, i32 1), align 8
+  %tmp1 = load float, ptr %tmp, align 4
   %conv1 = fadd float %tmp1, 1.000000e+00
-  store float %conv1, float* %tmp, align 4
-  store i32 32, i32* bitcast (float** getelementptr (%struct.bar, %struct.bar* addrspacecast (%struct.bar addrspace(3)* @var1 to %struct.bar*), i64 0, i32 1) to i32*), align 4
+  store float %conv1, ptr %tmp, align 4
+  store i32 32, ptr bitcast (ptr getelementptr (%struct.bar, ptr addrspacecast (ptr addrspace(3) @var1 to ptr), i64 0, i32 1) to ptr), align 4
   ret void
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
index 5985823ef4482..d99a9c1cdef93 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/builtin-assumed-addrspace.ll
@@ -1,107 +1,102 @@
 ; RUN: opt -S -mtriple=nvptx64-nvidia-cuda -infer-address-spaces -o - %s | FileCheck %s
 
 ; CHECK-LABEL: @f0
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(4)*
-; CHECK: getelementptr inbounds float, float addrspace(4)*
-; CHECK: load float, float addrspace(4)*
-define float @f0(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(4)
+; CHECK: getelementptr inbounds float, ptr addrspace(4)
+; CHECK: load float, ptr addrspace(4)
+define float @f0(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.nvvm.isspacep.const(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.nvvm.isspacep.const(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @f1
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(1)*
-; CHECK: getelementptr inbounds float, float addrspace(1)*
-; CHECK: load float, float addrspace(1)*
-define float @f1(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(1)
+; CHECK: getelementptr inbounds float, ptr addrspace(1)
+; CHECK: load float, ptr addrspace(1)
+define float @f1(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.nvvm.isspacep.global(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.nvvm.isspacep.global(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @f2
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(5)*
-; CHECK: getelementptr inbounds float, float addrspace(5)*
-; CHECK: load float, float addrspace(5)*
-define float @f2(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(5)
+; CHECK: getelementptr inbounds float, ptr addrspace(5)
+; CHECK: load float, ptr addrspace(5)
+define float @f2(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.nvvm.isspacep.local(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.nvvm.isspacep.local(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @f3
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
-; CHECK: getelementptr inbounds float, float addrspace(3)*
-; CHECK: load float, float addrspace(3)*
-define float @f3(float* %p) {
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(3)
+; CHECK: getelementptr inbounds float, ptr addrspace(3)
+; CHECK: load float, ptr addrspace(3)
+define float @f3(ptr %p) {
 entry:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.nvvm.isspacep.shared(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  ret float %3
+  %0 = call i1 @llvm.nvvm.isspacep.shared(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  ret float %2
 }
 
 ; CHECK-LABEL: @g0
 ; CHECK: if.then:
-; CHECK: addrspacecast float* {{%.*}} to float addrspace(3)*
-; CHECK: getelementptr inbounds float, float addrspace(3)*
-; CHECK: load float, float addrspace(3)*
+; CHECK: addrspacecast ptr {{%.*}} to ptr addrspace(3)
+; CHECK: getelementptr inbounds float, ptr addrspace(3)
+; CHECK: load float, ptr addrspace(3)
 ; CHECK: if.end:
-; CHECK: getelementptr inbounds float, float*
-; CHECK: load float, float*
-define float @g0(i32 %c, float* %p) {
+; CHECK: getelementptr inbounds float, ptr
+; CHECK: load float, ptr
+define float @g0(i32 %c, ptr %p) {
 entry:
   %tobool.not = icmp eq i32 %c, 0
   br i1 %tobool.not, label %if.end, label %if.then
 
 if.then:
-  %0 = bitcast float* %p to i8*
-  %1 = call i1 @llvm.nvvm.isspacep.shared(i8* %0)
-  tail call void @llvm.assume(i1 %1)
-  %2 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-  %idxprom = zext i32 %2 to i64
-  %arrayidx = getelementptr inbounds float, float* %p, i64 %idxprom
-  %3 = load float, float* %arrayidx, align 4
-  %add = fadd float %3, 0.
+  %0 = call i1 @llvm.nvvm.isspacep.shared(ptr %p)
+  tail call void @llvm.assume(i1 %0)
+  %1 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  %idxprom = zext i32 %1 to i64
+  %arrayidx = getelementptr inbounds float, ptr %p, i64 %idxprom
+  %2 = load float, ptr %arrayidx, align 4
+  %add = fadd float %2, 0.
   br label %if.end
 
 if.end:
   %s = phi float [ %add, %if.then ], [ 0., %entry ]
-  %4 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
-  %idxprom2 = zext i32 %4 to i64
-  %arrayidx2 = getelementptr inbounds float, float* %p, i64 %idxprom2
-  %5 = load float, float* %arrayidx2, align 4
-  %add2 = fadd float %s, %5
+  %3 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
+  %idxprom2 = zext i32 %3 to i64
+  %arrayidx2 = getelementptr inbounds float, ptr %p, i64 %idxprom2
+  %4 = load float, ptr %arrayidx2, align 4
+  %add2 = fadd float %s, %4
   ret float %add2
 }
 
 declare void @llvm.assume(i1)
-declare i1 @llvm.nvvm.isspacep.const(i8*)
-declare i1 @llvm.nvvm.isspacep.global(i8*)
-declare i1 @llvm.nvvm.isspacep.local(i8*)
-declare i1 @llvm.nvvm.isspacep.shared(i8*)
+declare i1 @llvm.nvvm.isspacep.const(ptr)
+declare i1 @llvm.nvvm.isspacep.global(ptr)
+declare i1 @llvm.nvvm.isspacep.local(ptr)
+declare i1 @llvm.nvvm.isspacep.shared(ptr)
 declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
 declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()

diff  --git a/llvm/test/Transforms/InferAddressSpaces/NVPTX/clone_constexpr.ll b/llvm/test/Transforms/InferAddressSpaces/NVPTX/clone_constexpr.ll
index 15fa984541df2..e76daeb6f745f 100644
--- a/llvm/test/Transforms/InferAddressSpaces/NVPTX/clone_constexpr.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/NVPTX/clone_constexpr.ll
@@ -12,62 +12,58 @@ define void @foo() local_unnamed_addr #0 {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[X0:%.*]] = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #[[ATTR2:[0-9]+]]
 ; CHECK-NEXT:    [[IDXPROM_I:%.*]] = zext i32 [[X0]] to i64
-; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr [[STRUCT_S:%.*]], %struct.S* addrspacecast ([[STRUCT_S]] addrspace(3)* @g1 to %struct.S*), i64 0, i32 0, i64 [[IDXPROM_I]]
-; CHECK-NEXT:    tail call void @f1(i32* [[ARRAYIDX_I]], i32 undef) #[[ATTR0:[0-9]+]]
-; CHECK-NEXT:    [[X1:%.*]] = load i32, i32 addrspace(3)* getelementptr inbounds ([[STRUCT_S]], [[STRUCT_S]] addrspace(3)* @g1, i64 0, i32 0, i64 0), align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr [[STRUCT_S:%.*]], ptr addrspacecast (ptr addrspace(3) @g1 to ptr), i64 0, i32 0, i64 [[IDXPROM_I]]
+; CHECK-NEXT:    tail call void @f1(ptr [[ARRAYIDX_I]], i32 undef) #[[ATTR0:[0-9]+]]
+; CHECK-NEXT:    [[X1:%.*]] = load i32, ptr addrspace(3) @g1, align 4
 ; CHECK-NEXT:    [[L_SROA_0_0_INSERT_EXT_I:%.*]] = zext i32 [[X1]] to i64
-; CHECK-NEXT:    tail call void @f2(i64* null, i64 [[L_SROA_0_0_INSERT_EXT_I]]) #[[ATTR0]]
+; CHECK-NEXT:    tail call void @f2(ptr null, i64 [[L_SROA_0_0_INSERT_EXT_I]]) #[[ATTR0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
   %x0 = tail call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #2
   %idxprom.i = zext i32 %x0 to i64
-  %arrayidx.i = getelementptr %struct.S, %struct.S* addrspacecast (%struct.S addrspace(3)* @g1 to %struct.S*), i64 0, i32 0, i64 %idxprom.i
-  tail call void @f1(i32* %arrayidx.i, i32 undef) #0
-  %x1 = load i32, i32* getelementptr (%struct.S, %struct.S* addrspacecast (%struct.S addrspace(3)* @g1 to %struct.S*), i64 0, i32 0, i64 0), align 4
+  %arrayidx.i = getelementptr %struct.S, ptr addrspacecast (ptr addrspace(3) @g1 to ptr), i64 0, i32 0, i64 %idxprom.i
+  tail call void @f1(ptr %arrayidx.i, i32 undef) #0
+  %x1 = load i32, ptr addrspacecast (ptr addrspace(3) @g1 to ptr), align 4
   %L.sroa.0.0.insert.ext.i = zext i32 %x1 to i64
-  tail call void @f2(i64* null, i64 %L.sroa.0.0.insert.ext.i) #0
+  tail call void @f2(ptr null, i64 %L.sroa.0.0.insert.ext.i) #0
   ret void
 }
 
-declare void @f1(i32*, i32) local_unnamed_addr #0
-declare void @f2(i64*, i64) local_unnamed_addr #0
+declare void @f1(ptr, i32) local_unnamed_addr #0
+declare void @f2(ptr, i64) local_unnamed_addr #0
 declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() #1
 
 ; Make sure we can clone GEP which uses complex constant expressions as indices.
 ; https://bugs.llvm.org/show_bug.cgi?id=51099
 @g2 = internal addrspace(3) global [128 x i8] undef, align 1
 
-define float @complex_ce(i8* nocapture readnone %a, i8* nocapture readnone %b, i8* nocapture readnone %c) local_unnamed_addr #0 {
+define float @complex_ce(ptr nocapture readnone %a, ptr nocapture readnone %b, ptr nocapture readnone %c) local_unnamed_addr #0 {
 ; CHECK-LABEL: @complex_ce(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load float, float addrspace(3)* bitcast (i8 addrspace(3)* getelementptr (i8, i8 addrspace(3)* getelementptr inbounds ([128 x i8], [128 x i8] addrspace(3)* @g2, i64 0, i64 0), i64 sub (i64 ptrtoint (i8 addrspace(3)* getelementptr inbounds ([128 x i8], [128 x i8] addrspace(3)* @g2, i64 0, i64 123) to i64), i64 ptrtoint (i8 addrspace(3)* getelementptr inbounds ([128 x i8], [128 x i8] addrspace(3)* @g2, i64 2, i64 0) to i64))) to float addrspace(3)*), align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(3) getelementptr (i8, ptr addrspace(3) @g2, i64 sub (i64 ptrtoint (ptr addrspace(3) getelementptr inbounds ([128 x i8], ptr addrspace(3) @g2, i64 0, i64 123) to i64), i64 ptrtoint (ptr addrspace(3) getelementptr inbounds ([128 x i8], ptr addrspace(3) @g2, i64 2, i64 0) to i64))), align 4
 ; CHECK-NEXT:    ret float [[TMP0]]
 ;
 entry:
-  %0 = load float, float* bitcast (
-       i8* getelementptr (
-         i8, i8* getelementptr inbounds (
-           [128 x i8],
-           [128 x i8]* addrspacecast ([128 x i8] addrspace(3)* @g2 to [128 x i8]*),
-           i64 0,
-           i64 0),
+  %0 = load float, ptr bitcast (
+       ptr getelementptr (
+         i8, ptr addrspacecast (ptr addrspace(3) @g2 to ptr),
          i64 sub (
            i64 ptrtoint (
-             i8* getelementptr inbounds (
+             ptr getelementptr inbounds (
                [128 x i8],
-               [128 x i8]* addrspacecast ([128 x i8] addrspace(3)* @g2 to [128 x i8]*),
+               ptr addrspacecast (ptr addrspace(3) @g2 to ptr),
                i64 0,
                i64 123)
              to i64),
            i64 ptrtoint (
-             i8* getelementptr inbounds (
+             ptr getelementptr inbounds (
                [128 x i8],
-               [128 x i8]* addrspacecast ([128 x i8] addrspace(3)* @g2 to [128 x i8]*),
+               ptr addrspacecast (ptr addrspace(3) @g2 to ptr),
                i64 2,
                i64 0)
              to i64)))
-        to float*), align 4
+        to ptr), align 4
   ret float %0
 }
 

diff  --git a/llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll b/llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
index 541264db05087..5ef7b0cdd401f 100644
--- a/llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
+++ b/llvm/test/Transforms/InferAddressSpaces/X86/noop-ptrint-pair.ll
@@ -5,12 +5,12 @@
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
 ; CHECK-LABEL: @noop_ptrint_pair(
-; CHECK: addrspacecast i32 addrspace(1)* %x to i32 addrspace(4)*
-; CHECK-NEXT: ptrtoint i32 addrspace(4)* %{{.*}} to i64
-; CHECK-NEXT: inttoptr i64 %{{.*}} to i32 addrspace(4)*
-define void @noop_ptrint_pair(i32 addrspace(1)* %x) {
-  %1 = addrspacecast i32 addrspace(1)* %x to i32 addrspace(4)*
-  %2 = ptrtoint i32 addrspace(4)* %1 to i64
-  %3 = inttoptr i64 %2 to i32 addrspace(4)*
+; CHECK: addrspacecast ptr addrspace(1) %x to ptr addrspace(4)
+; CHECK-NEXT: ptrtoint ptr addrspace(4) %{{.*}} to i64
+; CHECK-NEXT: inttoptr i64 %{{.*}} to ptr addrspace(4)
+define void @noop_ptrint_pair(ptr addrspace(1) %x) {
+  %1 = addrspacecast ptr addrspace(1) %x to ptr addrspace(4)
+  %2 = ptrtoint ptr addrspace(4) %1 to i64
+  %3 = inttoptr i64 %2 to ptr addrspace(4)
   ret void
 }


        


More information about the llvm-commits mailing list