[llvm] 8c58a9a - DivergenceAnalysis: Convert tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 28 05:55:14 PST 2022


Author: Matt Arsenault
Date: 2022-11-28T08:42:38-05:00
New Revision: 8c58a9ace0a157ffc14e3745546cab4869e38aa9

URL: https://github.com/llvm/llvm-project/commit/8c58a9ace0a157ffc14e3745546cab4869e38aa9
DIFF: https://github.com/llvm/llvm-project/commit/8c58a9ace0a157ffc14e3745546cab4869e38aa9.diff

LOG: DivergenceAnalysis: Convert tests to opaque pointers

Added: 
    

Modified: 
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/always_uniform.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/b42473-r1-crash.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/control-flow-intrinsics.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/inline-asm.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
    llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
    llvm/test/Analysis/DivergenceAnalysis/NVPTX/daorder.ll
    llvm/test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll
    llvm/test/Analysis/DivergenceAnalysis/NVPTX/hidden_diverge.ll
    llvm/test/Analysis/DivergenceAnalysis/NVPTX/irreducible.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/atomics.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/intrinsics.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/kernel-args.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/loads.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/no-return-blocks.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
    llvm/test/Analysis/LegacyDivergenceAnalysis/NVPTX/diverge.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/always_uniform.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/always_uniform.ll
index 1c7f0d7053d3e..ec4ccbd281a42 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/always_uniform.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/always_uniform.ll
@@ -47,8 +47,8 @@ define void @asm_mixed_sgpr_vgpr(i32 %divergent) {
   %asm = call { i32, i32 } asm "; def $0, $1, $2","=s,=v,v"(i32 %divergent)
   %sgpr = extractvalue { i32, i32 } %asm, 0
   %vgpr = extractvalue { i32, i32 } %asm, 1
-  store i32 %sgpr, i32 addrspace(1)* undef
-  store i32 %vgpr, i32 addrspace(1)* undef
+  store i32 %sgpr, ptr addrspace(1) undef
+  store i32 %vgpr, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
index 12d7fba69f487..123d9b48abeca 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/atomics.ll
@@ -1,57 +1,57 @@
 ; RUN: opt -mtriple amdgcn-- -passes='print<divergence>' -disable-output %s 2>&1 | FileCheck %s
 
-; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
-define amdgpu_kernel void @test1(i32* %ptr, i32 %val) #0 {
-  %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
-  store i32 %orig, i32* %ptr
+; CHECK: DIVERGENT: %orig = atomicrmw xchg ptr %ptr, i32 %val seq_cst
+define amdgpu_kernel void @test1(ptr %ptr, i32 %val) #0 {
+  %orig = atomicrmw xchg ptr %ptr, i32 %val seq_cst
+  store i32 %orig, ptr %ptr
   ret void
 }
 
-; CHECK: DIVERGENT: %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-define amdgpu_kernel void @test2(i32* %ptr, i32 %cmp, i32 %new) {
-  %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK: DIVERGENT: %orig = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+define amdgpu_kernel void @test2(ptr %ptr, i32 %cmp, i32 %new) {
+  %orig = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
   %val = extractvalue { i32, i1 } %orig, 0
-  store i32 %val, i32* %ptr
+  store i32 %val, ptr %ptr
   ret void
 }
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @test_atomic_inc_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_inc_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
-define i64 @test_atomic_inc_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
-  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_inc_i64(ptr addrspace(1) %ptr, i64 %val) #0 {
+  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @test_atomic_dec_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_dec_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
-define i64 @test_atomic_dec_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_dec_i64(ptr addrspace(1) %ptr, i64 %val) #0 {
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
-declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
-declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
-declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
-declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) nocapture, i64, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) nocapture, i64, i32, i32, i1) #1
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* %ptr, i32 %val)
-define amdgpu_kernel void @test_atomic_csub_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* %ptr, i32 %val)
-  store i32 %ret, i32 addrspace(1)* %ptr, align 4
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
+define amdgpu_kernel void @test_atomic_csub_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
+  store i32 %ret, ptr addrspace(1) %ptr, align 4
   ret void
 }
 
-declare i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* nocapture, i32) #1
+declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/b42473-r1-crash.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/b42473-r1-crash.ll
index 4c161234a27cf..f43f5f7d23df8 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/b42473-r1-crash.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/b42473-r1-crash.ll
@@ -3,10 +3,10 @@
 declare i32 @gf2(i32)
 declare i32 @gf1(i32)
 
-define  void @tw1(i32 addrspace(4)* noalias nocapture readonly %A, i32 addrspace(4)* noalias nocapture %B) local_unnamed_addr #2 {
+define  void @tw1(ptr addrspace(4) noalias nocapture readonly %A, ptr addrspace(4) noalias nocapture %B) local_unnamed_addr #2 {
 ; CHECK: Divergence Analysis' for function 'tw1':
-; CHECK: DIVERGENT: i32 addrspace(4)* %A
-; CHECK: DIVERGENT: i32 addrspace(4)* %B
+; CHECK: DIVERGENT: ptr addrspace(4) %A
+; CHECK: DIVERGENT: ptr addrspace(4) %B
 entry:
 ; CHECK: DIVERGENT:       %call = tail call i32 @gf2(i32 0) #0
 ; CHECK: DIVERGENT:       %cmp = icmp ult i32 %call, 16
@@ -17,66 +17,66 @@ entry:
 
 if.then:
 ; CHECK: DIVERGENT:       %call1 = tail call i32 @gf1(i32 0) #0
-; CHECK: DIVERGENT:       %arrayidx = getelementptr inbounds i32, i32 addrspace(4)* %A, i32 %call1
-; CHECK: DIVERGENT:       %0 = load i32, i32 addrspace(4)* %arrayidx, align 4
+; CHECK: DIVERGENT:       %arrayidx = getelementptr inbounds i32, ptr addrspace(4) %A, i32 %call1
+; CHECK: DIVERGENT:       %0 = load i32, ptr addrspace(4) %arrayidx, align 4
 ; CHECK: DIVERGENT:       %cmp225 = icmp sgt i32 %0, 0
-; CHECK: DIVERGENT:       %arrayidx10 = getelementptr inbounds i32, i32 addrspace(4)* %B, i32 %call1
+; CHECK: DIVERGENT:       %arrayidx10 = getelementptr inbounds i32, ptr addrspace(4) %B, i32 %call1
 ; CHECK: DIVERGENT:       br i1 %cmp225, label %while.body.preheader, label %if.then.while.end_crit_edge
   %call1 = tail call  i32 @gf1(i32 0) #4
-  %arrayidx = getelementptr inbounds i32, i32 addrspace(4)* %A, i32 %call1
-  %0 = load i32, i32 addrspace(4)* %arrayidx, align 4
+  %arrayidx = getelementptr inbounds i32, ptr addrspace(4) %A, i32 %call1
+  %0 = load i32, ptr addrspace(4) %arrayidx, align 4
   %cmp225 = icmp sgt i32 %0, 0
-  %arrayidx10 = getelementptr inbounds i32, i32 addrspace(4)* %B, i32 %call1
+  %arrayidx10 = getelementptr inbounds i32, ptr addrspace(4) %B, i32 %call1
   br i1 %cmp225, label %while.body.preheader, label %if.then.while.end_crit_edge
 
 while.body.preheader:
   br label %while.body
 
 if.then.while.end_crit_edge:
-; CHECK: DIVERGENT:       %.pre = load i32, i32 addrspace(4)* %arrayidx10, align 4
-  %.pre = load i32, i32 addrspace(4)* %arrayidx10, align 4
+; CHECK: DIVERGENT:       %.pre = load i32, ptr addrspace(4) %arrayidx10, align 4
+  %.pre = load i32, ptr addrspace(4) %arrayidx10, align 4
   br label %while.end
 
 while.body:
 ; CHECK-NOT: DIVERGENT:                  %i.026 = phi i32 [ %inc, %if.end.while.body_crit_edge ], [ 0, %while.body.preheader ]
 ; CHECK: DIVERGENT:       %call3 = tail call i32 @gf1(i32 0) #0
 ; CHECK: DIVERGENT:       %cmp4 = icmp ult i32 %call3, 10
-; CHECK: DIVERGENT:       %arrayidx6 = getelementptr inbounds i32, i32 addrspace(4)* %A, i32 %i.026
-; CHECK: DIVERGENT:       %1 = load i32, i32 addrspace(4)* %arrayidx6, align 4
+; CHECK: DIVERGENT:       %arrayidx6 = getelementptr inbounds i32, ptr addrspace(4) %A, i32 %i.026
+; CHECK: DIVERGENT:       %1 = load i32, ptr addrspace(4) %arrayidx6, align 4
 ; CHECK: DIVERGENT:       br i1 %cmp4, label %if.then5, label %if.else
   %i.026 = phi i32 [ %inc, %if.end.while.body_crit_edge ], [ 0, %while.body.preheader ]
   %call3 = tail call  i32 @gf1(i32 0) #4
   %cmp4 = icmp ult i32 %call3, 10
-  %arrayidx6 = getelementptr inbounds i32, i32 addrspace(4)* %A, i32 %i.026
-  %1 = load i32, i32 addrspace(4)* %arrayidx6, align 4
+  %arrayidx6 = getelementptr inbounds i32, ptr addrspace(4) %A, i32 %i.026
+  %1 = load i32, ptr addrspace(4) %arrayidx6, align 4
   br i1 %cmp4, label %if.then5, label %if.else
 
 if.then5:
 ; CHECK: DIVERGENT:       %mul = shl i32 %1, 1
-; CHECK: DIVERGENT:       %2 = load i32, i32 addrspace(4)* %arrayidx10, align 4
+; CHECK: DIVERGENT:       %2 = load i32, ptr addrspace(4) %arrayidx10, align 4
 ; CHECK: DIVERGENT:       %add = add nsw i32 %2, %mul
   %mul = shl i32 %1, 1
-  %2 = load i32, i32 addrspace(4)* %arrayidx10, align 4
+  %2 = load i32, ptr addrspace(4) %arrayidx10, align 4
   %add = add nsw i32 %2, %mul
   br label %if.end
 
 if.else:
 ; CHECK: DIVERGENT:       %mul9 = shl i32 %1, 2
-; CHECK: DIVERGENT:       %3 = load i32, i32 addrspace(4)* %arrayidx10, align 4
+; CHECK: DIVERGENT:       %3 = load i32, ptr addrspace(4) %arrayidx10, align 4
 ; CHECK: DIVERGENT:       %add11 = add nsw i32 %3, %mul9
   %mul9 = shl i32 %1, 2
-  %3 = load i32, i32 addrspace(4)* %arrayidx10, align 4
+  %3 = load i32, ptr addrspace(4) %arrayidx10, align 4
   %add11 = add nsw i32 %3, %mul9
   br label %if.end
 
 if.end:
 ; CHECK: DIVERGENT:       %storemerge = phi i32 [ %add11, %if.else ], [ %add, %if.then5 ]
-; CHECK: DIVERGENT:       store i32 %storemerge, i32 addrspace(4)* %arrayidx10, align 4
+; CHECK: DIVERGENT:       store i32 %storemerge, ptr addrspace(4) %arrayidx10, align 4
 ; CHECK-NOT: DIVERGENT:                  %inc = add nuw nsw i32 %i.026, 1
 ; CHECK: DIVERGENT:       %exitcond = icmp ne i32 %inc, %0
 ; CHECK: DIVERGENT:       br i1 %exitcond, label %if.end.while.body_crit_edge, label %while.end.loopexit
   %storemerge = phi i32 [ %add11, %if.else ], [ %add, %if.then5 ]
-  store i32 %storemerge, i32 addrspace(4)* %arrayidx10, align 4
+  store i32 %storemerge, ptr addrspace(4) %arrayidx10, align 4
   %inc = add nuw nsw i32 %i.026, 1
   %exitcond = icmp ne i32 %inc, %0
   br i1 %exitcond, label %if.end.while.body_crit_edge, label %while.end.loopexit
@@ -93,11 +93,11 @@ while.end:
 ; CHECK: DIVERGENT:       %4 = phi i32 [ %.pre, %if.then.while.end_crit_edge ], [ %storemerge.lcssa, %while.end.loopexit ]
 ; CHECK: DIVERGENT:       %i.0.lcssa = phi i32 [ 0, %if.then.while.end_crit_edge ], [ %0, %while.end.loopexit ]
 ; CHECK: DIVERGENT:       %sub = sub nsw i32 %4, %i.0.lcssa
-; CHECK: DIVERGENT:       store i32 %sub, i32 addrspace(4)* %arrayidx10, align 4
+; CHECK: DIVERGENT:       store i32 %sub, ptr addrspace(4) %arrayidx10, align 4
   %4 = phi i32 [ %.pre, %if.then.while.end_crit_edge ], [ %storemerge.lcssa, %while.end.loopexit ]
   %i.0.lcssa = phi i32 [ 0, %if.then.while.end_crit_edge ], [ %0, %while.end.loopexit ]
   %sub = sub nsw i32 %4, %i.0.lcssa
-  store i32 %sub, i32 addrspace(4)* %arrayidx10, align 4
+  store i32 %sub, ptr addrspace(4) %arrayidx10, align 4
   br label %new_exit
 
 new_exit:

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/control-flow-intrinsics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/control-flow-intrinsics.ll
index 25f2a768ed4bd..e26f0f8790b41 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/control-flow-intrinsics.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/control-flow-intrinsics.ll
@@ -10,7 +10,7 @@ define amdgpu_ps void @test_if_break(i32 %arg0, i64 inreg %saved) {
 entry:
   %cond = icmp eq i32 %arg0, 0
   %break = call i64 @llvm.amdgcn.if.break.i64.i64(i1 %cond, i64 %saved)
-  store volatile i64 %break, i64 addrspace(1)* undef
+  store volatile i64 %break, ptr addrspace(1) undef
   ret void
 }
 
@@ -27,8 +27,8 @@ entry:
   %if.bool = extractvalue { i1, i64 } %if, 0
   %if.mask = extractvalue { i1, i64 } %if, 1
   %if.bool.ext = zext i1 %if.bool to i32
-  store volatile i32 %if.bool.ext, i32 addrspace(1)* undef
-  store volatile i64 %if.mask, i64 addrspace(1)* undef
+  store volatile i32 %if.bool.ext, ptr addrspace(1) undef
+  store volatile i64 %if.mask, ptr addrspace(1) undef
   ret void
 }
 
@@ -46,8 +46,8 @@ entry:
   %if.bool = extractvalue { i1, i64 } %if, 0
   %if.mask = extractvalue { i1, i64 } %if, 1
   %if.bool.ext = zext i1 %if.bool to i32
-  store volatile i32 %if.bool.ext, i32 addrspace(1)* undef
-  store volatile i64 %if.mask, i64 addrspace(1)* undef
+  store volatile i32 %if.bool.ext, ptr addrspace(1) undef
+  store volatile i64 %if.mask, ptr addrspace(1) undef
   ret void
 }
 
@@ -57,7 +57,7 @@ define amdgpu_ps void @test_loop_uniform(i64 inreg %mask) {
 entry:
   %loop = call i1 @llvm.amdgcn.loop.i64(i64 %mask)
   %loop.ext = zext i1 %loop to i32
-  store volatile i32 %loop.ext, i32 addrspace(1)* undef
+  store volatile i32 %loop.ext, ptr addrspace(1) undef
   ret void
 }
 
@@ -71,8 +71,8 @@ entry:
   %else.bool = extractvalue { i1, i64 } %else, 0
   %else.mask = extractvalue { i1, i64 } %else, 1
   %else.bool.ext = zext i1 %else.bool to i32
-  store volatile i32 %else.bool.ext, i32 addrspace(1)* undef
-  store volatile i64 %else.mask, i64 addrspace(1)* undef
+  store volatile i32 %else.bool.ext, ptr addrspace(1) undef
+  store volatile i64 %else.mask, ptr addrspace(1) undef
   ret void
 }
 
@@ -88,8 +88,8 @@ entry:
   %if.bool = extractvalue { i1, i64 } %if, 0
   %if.mask = extractvalue { i1, i64 } %if, 1
   %if.bool.ext = zext i1 %if.bool to i32
-  store volatile i32 %if.bool.ext, i32 addrspace(1)* undef
-  store volatile i64 %if.mask, i64 addrspace(1)* undef
+  store volatile i32 %if.bool.ext, ptr addrspace(1) undef
+  store volatile i64 %if.mask, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/inline-asm.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/inline-asm.ll
index 65574e57ddb94..5f8176acb9495 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/inline-asm.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/inline-asm.ll
@@ -50,8 +50,8 @@ define void @inline_asm_2_sgpr_virtreg_output() {
   %asm = call { i32, i32 } asm "; def $0, $1", "=s,=s"()
   %sgpr0 = extractvalue { i32, i32 } %asm, 0
   %sgpr1 = extractvalue { i32, i32 } %asm, 1
-  store i32 %sgpr0, i32 addrspace(1)* undef
-  store i32 %sgpr1, i32 addrspace(1)* undef
+  store i32 %sgpr0, ptr addrspace(1) undef
+  store i32 %sgpr1, ptr addrspace(1) undef
   ret void
 }
 
@@ -64,8 +64,8 @@ define void @inline_asm_sgpr_vgpr_virtreg_output() {
   %asm = call { i32, i32 } asm "; def $0, $1", "=s,=v"()
   %sgpr = extractvalue { i32, i32 } %asm, 0
   %vgpr = extractvalue { i32, i32 } %asm, 1
-  store i32 %sgpr, i32 addrspace(1)* undef
-  store i32 %vgpr, i32 addrspace(1)* undef
+  store i32 %sgpr, ptr addrspace(1) undef
+  store i32 %vgpr, ptr addrspace(1) undef
   ret void
 }
 
@@ -77,8 +77,8 @@ define void @inline_asm_vgpr_sgpr_virtreg_output() {
   %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s"()
   %vgpr = extractvalue { i32, i32 } %asm, 0
   %sgpr = extractvalue { i32, i32 } %asm, 1
-  store i32 %vgpr, i32 addrspace(1)* undef
-  store i32 %sgpr, i32 addrspace(1)* undef
+  store i32 %vgpr, ptr addrspace(1) undef
+  store i32 %sgpr, ptr addrspace(1) undef
   ret void
 }
 
@@ -89,8 +89,8 @@ define void @multi_sgpr_inline_asm_output_input_constraint() {
   %asm = call { i32, i32 } asm "; def $0, $1", "=s,=s,s"(i32 1234)
   %sgpr0 = extractvalue { i32, i32 } %asm, 0
   %sgpr1 = extractvalue { i32, i32 } %asm, 1
-  store i32 %sgpr0, i32 addrspace(1)* undef
-  store i32 %sgpr1, i32 addrspace(1)* undef
+  store i32 %sgpr0, ptr addrspace(1) undef
+  store i32 %sgpr1, ptr addrspace(1) undef
   ret void
 }
 
@@ -102,7 +102,7 @@ define void @inline_asm_vgpr_sgpr_virtreg_output_input_constraint() {
   %asm = call { i32, i32 } asm "; def $0, $1", "=v,=s,v"(i32 1234)
   %vgpr = extractvalue { i32, i32 } %asm, 0
   %sgpr = extractvalue { i32, i32 } %asm, 1
-  store i32 %vgpr, i32 addrspace(1)* undef
-  store i32 %sgpr, i32 addrspace(1)* undef
+  store i32 %vgpr, ptr addrspace(1) undef
+  store i32 %sgpr, ptr addrspace(1) undef
   ret void
 }

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
index 51126aa7d8e97..564742137256d 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/intrinsics.ll
@@ -1,51 +1,51 @@
 ; RUN: opt -mtriple amdgcn-- -passes='print<divergence>' -disable-output %s 2>&1 | FileCheck %s
 
 ; CHECK: DIVERGENT: %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
-define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 {
+define amdgpu_kernel void @ds_swizzle(ptr addrspace(1) %out, i32 %src) #0 {
   %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
-  store i32 %swizzle, i32 addrspace(1)* %out, align 4
+  store i32 %swizzle, ptr addrspace(1) %out, align 4
   ret void
 }
 
 ; CHECK: DIVERGENT: %v = call i32 @llvm.amdgcn.permlane16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
-define amdgpu_kernel void @v_permlane16_b32(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+define amdgpu_kernel void @v_permlane16_b32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
   %v = call i32 @llvm.amdgcn.permlane16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
-  store i32 %v, i32 addrspace(1)* %out
+  store i32 %v, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK: DIVERGENT: %v = call i32 @llvm.amdgcn.permlanex16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
-define amdgpu_kernel void @v_permlanex16_b32(i32 addrspace(1)* %out, i32 %src0, i32 %src1, i32 %src2) #0 {
+define amdgpu_kernel void @v_permlanex16_b32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
   %v = call i32 @llvm.amdgcn.permlanex16(i32 %src0, i32 %src0, i32 %src1, i32 %src2, i1 false, i1 false) #0
-  store i32 %v, i32 addrspace(1)* %out
+  store i32 %v, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 false) #0
-define amdgpu_kernel void @update_dpp(i32 addrspace(1)* %out, i32 %in1, i32 %in2) #0 {
+define amdgpu_kernel void @update_dpp(ptr addrspace(1) %out, i32 %in1, i32 %in2) #0 {
   %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 false) #0
-  store i32 %tmp0, i32 addrspace(1)* %out
+  store i32 %tmp0, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 true) #0
-define amdgpu_kernel void @mov_dpp(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @mov_dpp(ptr addrspace(1) %out, i32 %in) #0 {
   %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %in, i32 1, i32 1, i32 1, i1 true) #0
-  store i32 %tmp0, i32 addrspace(1)* %out
+  store i32 %tmp0, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.mov.dpp8.i32(i32 %in, i32 1) #0
-define amdgpu_kernel void @mov_dpp8(i32 addrspace(1)* %out, i32 %in) #0 {
+define amdgpu_kernel void @mov_dpp8(ptr addrspace(1) %out, i32 %in) #0 {
   %tmp0 = call i32 @llvm.amdgcn.mov.dpp8.i32(i32 %in, i32 1) #0
-  store i32 %tmp0, i32 addrspace(1)* %out
+  store i32 %tmp0, ptr addrspace(1) %out
   ret void
 }
 
 ; CHECK: DIVERGENT: %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
-define amdgpu_kernel void @writelane(i32 addrspace(1)* %out) #0 {
+define amdgpu_kernel void @writelane(ptr addrspace(1) %out) #0 {
   %tmp0 = call i32 @llvm.amdgcn.writelane(i32 0, i32 1, i32 2)
-  store i32 %tmp0, i32 addrspace(1)* %out
+  store i32 %tmp0, ptr addrspace(1) %out
   ret void
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
index cb90bdf8f4e73..1d02d5c9bb9ff 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll
@@ -1,14 +1,14 @@
 ; RUN: opt -mtriple amdgcn-- -passes='print<divergence>' -disable-output %s 2>&1 | FileCheck %s
 
 ; CHECK-LABEL: Divergence Analysis' for function 'test_amdgpu_ps':
-; CHECK: DIVERGENT:  [4 x <16 x i8>] addrspace(4)* %arg0
+; CHECK: DIVERGENT:  ptr addrspace(4) %arg0
 ; CHECK-NOT: DIVERGENT
 ; CHECK: DIVERGENT:  <2 x i32> %arg3
 ; CHECK: DIVERGENT:  <3 x i32> %arg4
 ; CHECK: DIVERGENT:  float %arg5
 ; CHECK: DIVERGENT:  i32 %arg6
 
-define amdgpu_ps void @test_amdgpu_ps([4 x <16 x i8>] addrspace(4)* byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define amdgpu_ps void @test_amdgpu_ps(ptr addrspace(4) byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 
@@ -20,7 +20,7 @@ define amdgpu_ps void @test_amdgpu_ps([4 x <16 x i8>] addrspace(4)* byref([4 x <
 ; CHECK-NOT: %arg4
 ; CHECK-NOT: %arg5
 ; CHECK-NOT: %arg6
-define amdgpu_kernel void @test_amdgpu_kernel([4 x <16 x i8>] addrspace(4)* byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define amdgpu_kernel void @test_amdgpu_kernel(ptr addrspace(4) byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 
@@ -32,7 +32,7 @@ define amdgpu_kernel void @test_amdgpu_kernel([4 x <16 x i8>] addrspace(4)* byre
 ; CHECK: DIVERGENT:
 ; CHECK: DIVERGENT:
 ; CHECK: DIVERGENT:
-define void @test_c([4 x <16 x i8>] addrspace(5)* byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define void @test_c(ptr addrspace(5) byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
index 7153b2376f09b..176b8080ef4d7 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/no-return-blocks.ll
@@ -1,25 +1,25 @@
 ; RUN: opt -mtriple amdgcn-- -passes='print<divergence>' -disable-output %s 2>&1 | FileCheck %s
 
-; CHECK: DIVERGENT:  %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp2
-; CHECK: DIVERGENT:  %tmp10 = load volatile float, float addrspace(1)* %tmp5, align 4
-; CHECK: DIVERGENT:  %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4
+; CHECK: DIVERGENT:  %tmp5 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp2
+; CHECK: DIVERGENT:  %tmp10 = load volatile float, ptr addrspace(1) %tmp5, align 4
+; CHECK: DIVERGENT:  %tmp11 = load volatile float, ptr addrspace(1) %tmp5, align 4
 
 ; The post dominator tree does not have a root node in this case
-define amdgpu_kernel void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 {
+define amdgpu_kernel void @no_return_blocks(ptr addrspace(1) noalias nocapture readonly %arg, ptr addrspace(1) noalias nocapture readonly %arg1) #0 {
 bb0:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
   %tmp2 = sext i32 %tmp to i64
-  %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp2
-  %tmp6 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp5 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp2
+  %tmp6 = load volatile float, ptr addrspace(1) %tmp5, align 4
   %tmp8 = fcmp olt float %tmp6, 0.000000e+00
   br i1 %tmp8, label %bb1, label %bb2
 
 bb1:
-  %tmp10 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp10 = load volatile float, ptr addrspace(1) %tmp5, align 4
   br label %bb2
 
 bb2:
-  %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp11 = load volatile float, ptr addrspace(1) %tmp5, align 4
   br label %bb1
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
index ab1a51cb60cdf..0c4104a247d67 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
@@ -6,7 +6,7 @@ entry:
   unreachable
 
 unreachable_loop:                                        ; preds = %do.body.i, %if.then11
-  %tmp = cmpxchg volatile i32 addrspace(1)* null, i32 0, i32 0 seq_cst seq_cst
+  %tmp = cmpxchg volatile ptr addrspace(1) null, i32 0, i32 0 seq_cst seq_cst
   %cmp.i = extractvalue { i32, i1 } %tmp, 1
   br i1 %cmp.i, label %unreachable_loop, label %end
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
index 492e93aa416b3..0d47665d571cc 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
@@ -9,35 +9,35 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
 ; CHECK: DIVERGENT:  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
 define amdgpu_kernel void @workitem_id_x() #1 {
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
-  store volatile i32 %id.x, i32 addrspace(1)* undef
+  store volatile i32 %id.x, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %id.y = call i32 @llvm.amdgcn.workitem.id.y()
 define amdgpu_kernel void @workitem_id_y() #1 {
   %id.y = call i32 @llvm.amdgcn.workitem.id.y()
-  store volatile i32 %id.y, i32 addrspace(1)* undef
+  store volatile i32 %id.y, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %id.z = call i32 @llvm.amdgcn.workitem.id.z()
 define amdgpu_kernel void @workitem_id_z() #1 {
   %id.z = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %id.z, i32 addrspace(1)* undef
+  store volatile i32 %id.z, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
 define amdgpu_kernel void @mbcnt_lo() #1 {
   %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
-  store volatile i32 %mbcnt.lo, i32 addrspace(1)* undef
+  store volatile i32 %mbcnt.lo, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
 define amdgpu_kernel void @mbcnt_hi() #1 {
   %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
-  store volatile i32 %mbcnt.hi, i32 addrspace(1)* undef
+  store volatile i32 %mbcnt.hi, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/daorder.ll b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/daorder.ll
index 6bdecb84b6de9..d830ae0285dc6 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/daorder.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/daorder.ll
@@ -44,4 +44,4 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
 declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
 
 !nvvm.annotations = !{!0}
-!0 = !{i32 (i32)* @daorder, !"kernel", i32 1}
+!0 = !{ptr @daorder, !"kernel", i32 1}

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll
index 5ddfaf2a1ca05..2a5242b6323a2 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll
@@ -168,8 +168,8 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
 declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
 
 !nvvm.annotations = !{!0, !1, !2, !3, !4}
-!0 = !{i32 (i32, i32, i32)* @no_diverge, !"kernel", i32 1}
-!1 = !{i32 (i32, i32)* @sync, !"kernel", i32 1}
-!2 = !{i32 (i32, i32, i32)* @mixed, !"kernel", i32 1}
-!3 = !{i32 ()* @loop, !"kernel", i32 1}
-!4 = !{i32 (i32)* @sync_no_loop, !"kernel", i32 1}
+!0 = !{ptr @no_diverge, !"kernel", i32 1}
+!1 = !{ptr @sync, !"kernel", i32 1}
+!2 = !{ptr @mixed, !"kernel", i32 1}
+!3 = !{ptr @loop, !"kernel", i32 1}
+!4 = !{ptr @sync_no_loop, !"kernel", i32 1}

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/hidden_diverge.ll b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/hidden_diverge.ll
index 5b72aa8d2899f..bb12f8b212809 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/hidden_diverge.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/hidden_diverge.ll
@@ -27,4 +27,4 @@ merge:
 declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
 
 !nvvm.annotations = !{!0}
-!0 = !{i32 (i32, i32, i32)* @hidden_diverge, !"kernel", i32 1}
+!0 = !{ptr @hidden_diverge, !"kernel", i32 1}

diff  --git a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/irreducible.ll b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/irreducible.ll
index 4237abe998e99..b81045dfb4a03 100644
--- a/llvm/test/Analysis/DivergenceAnalysis/NVPTX/irreducible.ll
+++ b/llvm/test/Analysis/DivergenceAnalysis/NVPTX/irreducible.ll
@@ -59,4 +59,4 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
 declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
 
 !nvvm.annotations = !{!0}
-!0 = !{i32 (i1)* @unstructured_loop, !"kernel", i32 1}
+!0 = !{ptr @unstructured_loop, !"kernel", i32 1}

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/atomics.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/atomics.ll
index e6f2385ba130a..6ed0497dce7cc 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/atomics.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/atomics.ll
@@ -1,57 +1,57 @@
 ; RUN: opt -mtriple=amdgcn-- -amdgpu-use-legacy-divergence-analysis -passes='print<divergence>' 2>&1 -disable-output %s | FileCheck %s
 
-; CHECK: DIVERGENT: %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
-define amdgpu_kernel void @test1(i32* %ptr, i32 %val) #0 {
-  %orig = atomicrmw xchg i32* %ptr, i32 %val seq_cst
-  store i32 %orig, i32* %ptr
+; CHECK: DIVERGENT: %orig = atomicrmw xchg ptr %ptr, i32 %val seq_cst
+define amdgpu_kernel void @test1(ptr %ptr, i32 %val) #0 {
+  %orig = atomicrmw xchg ptr %ptr, i32 %val seq_cst
+  store i32 %orig, ptr %ptr
   ret void
 }
 
-; CHECK: DIVERGENT: %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
-define amdgpu_kernel void @test2(i32* %ptr, i32 %cmp, i32 %new) {
-  %orig = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+; CHECK: DIVERGENT: %orig = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
+define amdgpu_kernel void @test2(ptr %ptr, i32 %cmp, i32 %new) {
+  %orig = cmpxchg ptr %ptr, i32 %cmp, i32 %new seq_cst seq_cst
   %val = extractvalue { i32, i1 } %orig, 0
-  store i32 %val, i32* %ptr
+  store i32 %val, ptr %ptr
   ret void
 }
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @test_atomic_inc_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_inc_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
-define i64 @test_atomic_inc_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
-  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_inc_i64(ptr addrspace(1) %ptr, i64 %val) #0 {
+  %ret = call i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
-define i32 @test_atomic_dec_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %ptr, i32 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
+define i32 @test_atomic_dec_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) %ptr, i32 %val, i32 0, i32 0, i1 false)
   ret i32 %ret
 }
 
-; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
-define i64 @test_atomic_dec_i64(i64 addrspace(1)* %ptr, i64 %val) #0 {
-  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* %ptr, i64 %val, i32 0, i32 0, i1 false)
+; CHECK: DIVERGENT: %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
+define i64 @test_atomic_dec_i64(ptr addrspace(1) %ptr, i64 %val) #0 {
+  %ret = call i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) %ptr, i64 %val, i32 0, i32 0, i1 false)
   ret i64 %ret
 }
 
-declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
-declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
-declare i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #1
-declare i64 @llvm.amdgcn.atomic.dec.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.inc.i32.p1(ptr addrspace(1) nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.inc.i64.p1(ptr addrspace(1) nocapture, i64, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.dec.i32.p1(ptr addrspace(1) nocapture, i32, i32, i32, i1) #1
+declare i64 @llvm.amdgcn.atomic.dec.i64.p1(ptr addrspace(1) nocapture, i64, i32, i32, i1) #1
 
-; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* %ptr, i32 %val)
-define amdgpu_kernel void @test_atomic_csub_i32(i32 addrspace(1)* %ptr, i32 %val) #0 {
-  %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* %ptr, i32 %val)
-  store i32 %ret, i32 addrspace(1)* %ptr, align 4
+; CHECK: DIVERGENT: %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
+define amdgpu_kernel void @test_atomic_csub_i32(ptr addrspace(1) %ptr, i32 %val) #0 {
+  %ret = call i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) %ptr, i32 %val)
+  store i32 %ret, ptr addrspace(1) %ptr, align 4
   ret void
 }
 
-declare i32 @llvm.amdgcn.global.atomic.csub.p1i32(i32 addrspace(1)* nocapture, i32) #1
+declare i32 @llvm.amdgcn.global.atomic.csub.p1(ptr addrspace(1) nocapture, i32) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { argmemonly nounwind willreturn }

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/intrinsics.ll
index 5b381a550780b..cf803136b29fe 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/intrinsics.ll
@@ -1,9 +1,9 @@
 ; RUN: opt -mtriple=amdgcn-- -passes='print<divergence>' 2>&1 -disable-output -amdgpu-use-legacy-divergence-analysis %s | FileCheck %s
 
 ; CHECK: DIVERGENT: %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
-define amdgpu_kernel void @ds_swizzle(i32 addrspace(1)* %out, i32 %src) #0 {
+define amdgpu_kernel void @ds_swizzle(ptr addrspace(1) %out, i32 %src) #0 {
   %swizzle = call i32 @llvm.amdgcn.ds.swizzle(i32 %src, i32 100) #0
-  store i32 %swizzle, i32 addrspace(1)* %out, align 4
+  store i32 %swizzle, ptr addrspace(1) %out, align 4
   ret void
 }
 

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/kernel-args.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/kernel-args.ll
index d61bd1ad3a1fb..a70210be20652 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/kernel-args.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/kernel-args.ll
@@ -1,14 +1,14 @@
 ; RUN: opt %s -mtriple amdgcn-- -amdgpu-use-legacy-divergence-analysis -passes='print<divergence>' 2>&1 -disable-output | FileCheck %s
 
 ; CHECK-LABEL: function 'test_amdgpu_ps':
-; CHECK: DIVERGENT:  [4 x <16 x i8>] addrspace(4)* %arg0
+; CHECK: DIVERGENT:  ptr addrspace(4) %arg0
 ; CHECK-NOT: DIVERGENT
 ; CHECK: DIVERGENT:  <2 x i32> %arg3
 ; CHECK: DIVERGENT:  <3 x i32> %arg4
 ; CHECK: DIVERGENT:  float %arg5
 ; CHECK: DIVERGENT:  i32 %arg6
 
-define amdgpu_ps void @test_amdgpu_ps([4 x <16 x i8>] addrspace(4)* byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define amdgpu_ps void @test_amdgpu_ps(ptr addrspace(4) byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 
@@ -20,7 +20,7 @@ define amdgpu_ps void @test_amdgpu_ps([4 x <16 x i8>] addrspace(4)* byref([4 x <
 ; CHECK-NOT: %arg4
 ; CHECK-NOT: %arg5
 ; CHECK-NOT: %arg6
-define amdgpu_kernel void @test_amdgpu_kernel([4 x <16 x i8>] addrspace(4)* byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define amdgpu_kernel void @test_amdgpu_kernel(ptr addrspace(4) byref([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 
@@ -32,7 +32,7 @@ define amdgpu_kernel void @test_amdgpu_kernel([4 x <16 x i8>] addrspace(4)* byre
 ; CHECK: DIVERGENT:
 ; CHECK: DIVERGENT:
 ; CHECK: DIVERGENT:
-define void @test_c([4 x <16 x i8>] addrspace(4)* byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
+define void @test_c(ptr addrspace(4) byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 {
   ret void
 }
 

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/loads.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/loads.ll
index b4fe76be8ff66..5394b677c621b 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/loads.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/loads.ll
@@ -2,14 +2,14 @@
 
 ; Test that we consider loads from flat and private addrspaces to be divergent.
 
-; CHECK: DIVERGENT: %val = load i32, i32* %flat, align 4
-define amdgpu_kernel void @flat_load(i32* %flat) {
-  %val = load i32, i32* %flat, align 4
+; CHECK: DIVERGENT: %val = load i32, ptr %flat, align 4
+define amdgpu_kernel void @flat_load(ptr %flat) {
+  %val = load i32, ptr %flat, align 4
   ret void
 }
 
-; CHECK: DIVERGENT: %val = load i32, i32 addrspace(5)* %priv, align 4
-define amdgpu_kernel void @private_load(i32 addrspace(5)* %priv) {
-  %val = load i32, i32 addrspace(5)* %priv, align 4
+; CHECK: DIVERGENT: %val = load i32, ptr addrspace(5) %priv, align 4
+define amdgpu_kernel void @private_load(ptr addrspace(5) %priv) {
+  %val = load i32, ptr addrspace(5) %priv, align 4
   ret void
 }

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/no-return-blocks.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/no-return-blocks.ll
index b8670d27d4d5f..8c30a418629b9 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/no-return-blocks.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/no-return-blocks.ll
@@ -1,25 +1,25 @@
 ; RUN: opt %s -mtriple amdgcn-- -amdgpu-use-legacy-divergence-analysis -passes='print<divergence>' 2>&1 -disable-output | FileCheck %s
 
-; CHECK: DIVERGENT:  %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp2
-; CHECK: DIVERGENT:  %tmp10 = load volatile float, float addrspace(1)* %tmp5, align 4
-; CHECK: DIVERGENT:  %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4
+; CHECK: DIVERGENT:  %tmp5 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp2
+; CHECK: DIVERGENT:  %tmp10 = load volatile float, ptr addrspace(1) %tmp5, align 4
+; CHECK: DIVERGENT:  %tmp11 = load volatile float, ptr addrspace(1) %tmp5, align 4
 
 ; The post dominator tree does not have a root node in this case
-define amdgpu_kernel void @no_return_blocks(float addrspace(1)* noalias nocapture readonly %arg, float addrspace(1)* noalias nocapture readonly %arg1) #0 {
+define amdgpu_kernel void @no_return_blocks(ptr addrspace(1) noalias nocapture readonly %arg, ptr addrspace(1) noalias nocapture readonly %arg1) #0 {
 bb0:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
   %tmp2 = sext i32 %tmp to i64
-  %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp2
-  %tmp6 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp5 = getelementptr inbounds float, ptr addrspace(1) %arg, i64 %tmp2
+  %tmp6 = load volatile float, ptr addrspace(1) %tmp5, align 4
   %tmp8 = fcmp olt float %tmp6, 0.000000e+00
   br i1 %tmp8, label %bb1, label %bb2
 
 bb1:
-  %tmp10 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp10 = load volatile float, ptr addrspace(1) %tmp5, align 4
   br label %bb2
 
 bb2:
-  %tmp11 = load volatile float, float addrspace(1)* %tmp5, align 4
+  %tmp11 = load volatile float, ptr addrspace(1) %tmp5, align 4
   br label %bb1
 }
 

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/unreachable-loop-block.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
index 41193dc201d11..14f6318d646bf 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/unreachable-loop-block.ll
@@ -6,7 +6,7 @@ entry:
   unreachable
 
 unreachable_loop:                                        ; preds = %do.body.i, %if.then11
-  %tmp = cmpxchg volatile i32 addrspace(1)* null, i32 0, i32 0 seq_cst seq_cst
+  %tmp = cmpxchg volatile ptr addrspace(1) null, i32 0, i32 0 seq_cst seq_cst
   %cmp.i = extractvalue { i32, i1 } %tmp, 1
   br i1 %cmp.i, label %unreachable_loop, label %end
 

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/workitem-intrinsics.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
index 53e78deaeddb1..b33a6724edb2d 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/AMDGPU/workitem-intrinsics.ll
@@ -9,35 +9,35 @@ declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #0
 ; CHECK: DIVERGENT:  %id.x = call i32 @llvm.amdgcn.workitem.id.x()
 define amdgpu_kernel void @workitem_id_x() #1 {
   %id.x = call i32 @llvm.amdgcn.workitem.id.x()
-  store volatile i32 %id.x, i32 addrspace(1)* undef
+  store volatile i32 %id.x, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %id.y = call i32 @llvm.amdgcn.workitem.id.y()
 define amdgpu_kernel void @workitem_id_y() #1 {
   %id.y = call i32 @llvm.amdgcn.workitem.id.y()
-  store volatile i32 %id.y, i32 addrspace(1)* undef
+  store volatile i32 %id.y, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %id.z = call i32 @llvm.amdgcn.workitem.id.z()
 define amdgpu_kernel void @workitem_id_z() #1 {
   %id.z = call i32 @llvm.amdgcn.workitem.id.z()
-  store volatile i32 %id.z, i32 addrspace(1)* undef
+  store volatile i32 %id.z, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
 define amdgpu_kernel void @mbcnt_lo() #1 {
   %mbcnt.lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 0, i32 0)
-  store volatile i32 %mbcnt.lo, i32 addrspace(1)* undef
+  store volatile i32 %mbcnt.lo, ptr addrspace(1) undef
   ret void
 }
 
 ; CHECK: DIVERGENT:  %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
 define amdgpu_kernel void @mbcnt_hi() #1 {
   %mbcnt.hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 0, i32 0)
-  store volatile i32 %mbcnt.hi, i32 addrspace(1)* undef
+  store volatile i32 %mbcnt.hi, ptr addrspace(1) undef
   ret void
 }
 

diff  --git a/llvm/test/Analysis/LegacyDivergenceAnalysis/NVPTX/diverge.ll b/llvm/test/Analysis/LegacyDivergenceAnalysis/NVPTX/diverge.ll
index f1c1476e227bb..cd965a5cb27fd 100644
--- a/llvm/test/Analysis/LegacyDivergenceAnalysis/NVPTX/diverge.ll
+++ b/llvm/test/Analysis/LegacyDivergenceAnalysis/NVPTX/diverge.ll
@@ -211,9 +211,9 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
 declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
 
 !nvvm.annotations = !{!0, !1, !2, !3, !4, !5}
-!0 = !{i32 (i32, i32, i32)* @no_diverge, !"kernel", i32 1}
-!1 = !{i32 (i32, i32)* @sync, !"kernel", i32 1}
-!2 = !{i32 (i32, i32, i32)* @mixed, !"kernel", i32 1}
-!3 = !{i32 ()* @loop, !"kernel", i32 1}
-!4 = !{i32 (i1)* @unstructured_loop, !"kernel", i32 1}
-!5 = !{i32 (i32)* @sync_no_loop, !"kernel", i32 1}
+!0 = !{ptr @no_diverge, !"kernel", i32 1}
+!1 = !{ptr @sync, !"kernel", i32 1}
+!2 = !{ptr @mixed, !"kernel", i32 1}
+!3 = !{ptr @loop, !"kernel", i32 1}
+!4 = !{ptr @unstructured_loop, !"kernel", i32 1}
+!5 = !{ptr @sync_no_loop, !"kernel", i32 1}


        


More information about the llvm-commits mailing list