[llvm] aa65dba - LoopStrengthReduce: Convert AMDGPU tests to opaque pointers

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 27 18:17:53 PST 2023


Author: Matt Arsenault
Date: 2023-01-27T22:17:20-04:00
New Revision: aa65dba05cfd0bb2fa06f1e6c2b35d6e46b38ea2

URL: https://github.com/llvm/llvm-project/commit/aa65dba05cfd0bb2fa06f1e6c2b35d6e46b38ea2
DIFF: https://github.com/llvm/llvm-project/commit/aa65dba05cfd0bb2fa06f1e6c2b35d6e46b38ea2.diff

LOG: LoopStrengthReduce: Convert AMDGPU tests to opaque pointers

Added: 
    

Modified: 
    llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
    llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
    llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
    llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
index 127a172d3c39a..b67235c0cc803 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
@@ -1,4 +1,4 @@
-; RUN: opt -opaque-pointers=0 -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
 
@@ -7,18 +7,18 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 ; OPT-LABEL: @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(
 
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i32, i32 addrspace(3)*  %arg1, i32 16383
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532
 ; OPT: br label %.lr.ph
 ; OPT: .lr.ph:
-; OPT: %lsr.iv3 = phi i32 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
 ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
-; OPT: %tmp4 = atomicrmw add i32 addrspace(3)* %lsr.iv3, i32 undef seq_cst, align 4
-; OPT: %tmp7 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 undef seq_cst, align 4
-; OPT: %0 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 %tmp8 seq_cst, align 4
-; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv3, i32 1
+; OPT: %tmp4 = atomicrmw add ptr addrspace(3) %lsr.iv3, i32 undef seq_cst, align 4
+; OPT: %tmp7 = atomicrmw add ptr addrspace(3) %lsr.iv1, i32 undef seq_cst, align 4
+; OPT: %0 = atomicrmw add ptr addrspace(3) %lsr.iv1, i32 %tmp8 seq_cst, align 4
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4
 ; OPT: br i1 %exitcond
-define amdgpu_kernel void @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -35,12 +35,12 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i32 %indvars.iv, 16383
-  %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
-  %tmp4 = atomicrmw add i32 addrspace(3)* %tmp3, i32 undef seq_cst
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
-  %tmp7 = atomicrmw add i32 addrspace(3)* %tmp6, i32 undef seq_cst
+  %tmp3 = getelementptr inbounds i32, ptr addrspace(3) %arg1, i32 %tmp1
+  %tmp4 = atomicrmw add ptr addrspace(3) %tmp3, i32 undef seq_cst
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(3) %arg0, i32 %indvars.iv
+  %tmp7 = atomicrmw add ptr addrspace(3) %tmp6, i32 undef seq_cst
   %tmp8 = add nsw i32 %tmp7, %tmp4
-  atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+  atomicrmw add ptr addrspace(3) %tmp6, i32 %tmp8 seq_cst
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
   %exitcond = icmp eq i32 %indvars.iv.next, %n
   br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
@@ -49,15 +49,15 @@ bb:
 ; OPT-LABEL: test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(
 
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i32, i32 addrspace(3)*  %arg1, i32 16383
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3)  %arg1, i32 65532
 ; OPT: br label %.lr.ph
 ; OPT: .lr.ph:
-; OPT: %lsr.iv3 = phi i32 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
 ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
-; OPT: %tmp4 =  cmpxchg i32 addrspace(3)* %lsr.iv3, i32 undef, i32 undef seq_cst monotonic, align 4
-; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv3, i32 1
-define amdgpu_kernel void @test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %tmp4 =  cmpxchg ptr addrspace(3) %lsr.iv3, i32 undef, i32 undef seq_cst monotonic, align 4
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4
+define amdgpu_kernel void @test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -74,14 +74,14 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i32 %indvars.iv, 16383
-  %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
-  %tmp4 = cmpxchg i32 addrspace(3)* %tmp3, i32 undef, i32 undef seq_cst monotonic
+  %tmp3 = getelementptr inbounds i32, ptr addrspace(3) %arg1, i32 %tmp1
+  %tmp4 = cmpxchg ptr addrspace(3) %tmp3, i32 undef, i32 undef seq_cst monotonic
   %tmp4.0 = extractvalue { i32, i1 } %tmp4, 0
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
-  %tmp7 = cmpxchg i32 addrspace(3)* %tmp6, i32 undef, i32 undef seq_cst monotonic
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(3) %arg0, i32 %indvars.iv
+  %tmp7 = cmpxchg ptr addrspace(3) %tmp6, i32 undef, i32 undef seq_cst monotonic
   %tmp7.0 = extractvalue { i32, i1 } %tmp7, 0
   %tmp8 = add nsw i32 %tmp7.0, %tmp4.0
-  atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+  atomicrmw add ptr addrspace(3) %tmp6, i32 %tmp8 seq_cst
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
   %exitcond = icmp eq i32 %indvars.iv.next, %n
   br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
@@ -90,16 +90,16 @@ bb:
 ; OPT-LABEL: @test_local_atomicinc_addressing_loop_uniform_index_max_offset_i32(
 
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i32, i32 addrspace(3)*  %arg1, i32 16383
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3)  %arg1, i32 65532
 ; OPT: br label %.lr.ph
 ; OPT: .lr.ph:
-; OPT: %lsr.iv3 = phi i32 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
 ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
-; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %lsr.iv3, i32 undef, i32 0, i32 0, i1 false)
-; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %lsr.iv1, i32 undef, i32 0, i32 0, i1 false)
-; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv3, i32 1
-define amdgpu_kernel void @test_local_atomicinc_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %lsr.iv3, i32 undef, i32 0, i32 0, i1 false)
+; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %lsr.iv1, i32 undef, i32 0, i32 0, i1 false)
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4
+define amdgpu_kernel void @test_local_atomicinc_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -116,12 +116,12 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i32 %indvars.iv, 16383
-  %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
-  %tmp4 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %tmp3, i32 undef, i32 0, i32 0, i1 false)
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
-  %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %tmp6, i32 undef, i32 0, i32 0, i1 false)
+  %tmp3 = getelementptr inbounds i32, ptr addrspace(3) %arg1, i32 %tmp1
+  %tmp4 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %tmp3, i32 undef, i32 0, i32 0, i1 false)
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(3) %arg0, i32 %indvars.iv
+  %tmp7 = call i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) %tmp6, i32 undef, i32 0, i32 0, i1 false)
   %tmp8 = add nsw i32 %tmp7, %tmp4
-  atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+  atomicrmw add ptr addrspace(3) %tmp6, i32 %tmp8 seq_cst
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
   %exitcond = icmp eq i32 %indvars.iv.next, %n
   br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
@@ -129,16 +129,16 @@ bb:
 
 ; OPT-LABEL: @test_local_atomicdec_addressing_loop_uniform_index_max_offset_i32(
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i32, i32 addrspace(3)*  %arg1, i32 16383
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65532
 ; OPT: br label %.lr.ph
 ; OPT: .lr.ph:
-; OPT: %lsr.iv3 = phi i32 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi ptr addrspace(3) [ %uglygep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
 ; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
-; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %lsr.iv3, i32 undef, i32 0, i32 0, i1 false)
-; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %lsr.iv1, i32 undef, i32 0, i32 0, i1 false)
-; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv3, i32 1
-define amdgpu_kernel void @test_local_atomicdec_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %tmp4 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %lsr.iv3, i32 undef, i32 0, i32 0, i1 false)
+; OPT: %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %lsr.iv1, i32 undef, i32 0, i32 0, i1 false)
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 4
+define amdgpu_kernel void @test_local_atomicdec_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(3) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -155,19 +155,19 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i32 %indvars.iv, 16383
-  %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
-  %tmp4 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %tmp3, i32 undef, i32 0, i32 0, i1 false)
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
-  %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %tmp6, i32 undef, i32 0, i32 0, i1 false)
+  %tmp3 = getelementptr inbounds i32, ptr addrspace(3) %arg1, i32 %tmp1
+  %tmp4 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %tmp3, i32 undef, i32 0, i32 0, i1 false)
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(3) %arg0, i32 %indvars.iv
+  %tmp7 = call i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) %tmp6, i32 undef, i32 0, i32 0, i1 false)
   %tmp8 = add nsw i32 %tmp7, %tmp4
-  atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+  atomicrmw add ptr addrspace(3) %tmp6, i32 %tmp8 seq_cst
   %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
   %exitcond = icmp eq i32 %indvars.iv.next, %n
   br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
 }
 
-declare i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #1
-declare i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.inc.i32.p3(ptr addrspace(3) nocapture, i32, i32, i32, i1) #1
+declare i32 @llvm.amdgcn.atomic.dec.i32.p3(ptr addrspace(3) nocapture, i32, i32, i32, i1) #1
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind argmemonly }

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/
diff erent-addrspace-addressing-mode-loops.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/
diff erent-addrspace-addressing-mode-loops.ll
index 9271724757685..0a757419b59b5 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/
diff erent-addrspace-addressing-mode-loops.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/
diff erent-addrspace-addressing-mode-loops.ll
@@ -1,4 +1,4 @@
-; RUN: opt -opaque-pointers=0 -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
 
 ; Test that loops with 
diff erent maximum offsets for 
diff erent address
 ; spaces are correctly handled.
@@ -7,13 +7,13 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 
 ; OPT-LABEL: @test_global_addressing_loop_uniform_index_max_offset_i32(
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i8, i8 addrspace(1)* %arg1, i64 4095
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4095
 ; OPT: br label %.lr.ph
 ; OPT: {{^}}.lr.ph:
-; OPT: %lsr.iv3 = phi i8 addrspace(1)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: load i8, i8 addrspace(1)* %lsr.iv3, align 1
-; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv3, i64 1
-define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: load i8, ptr addrspace(1) %lsr.iv3, align 1
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(1) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -30,13 +30,13 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i64 %indvars.iv, 4095
-  %tmp2 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %tmp1
-  %tmp3 = load i8, i8 addrspace(1)* %tmp2, align 1
+  %tmp2 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %tmp1
+  %tmp3 = load i8, ptr addrspace(1) %tmp2, align 1
   %tmp4 = sext i8 %tmp3 to i32
-  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv
-  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
+  %tmp5 = getelementptr inbounds i32, ptr addrspace(1) %arg0, i64 %indvars.iv
+  %tmp6 = load i32, ptr addrspace(1) %tmp5, align 4
   %tmp7 = add nsw i32 %tmp6, %tmp4
-  store i32 %tmp7, i32 addrspace(1)* %tmp5, align 4
+  store i32 %tmp7, ptr addrspace(1) %tmp5, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -45,13 +45,13 @@ bb:
 
 ; OPT-LABEL: @test_global_addressing_loop_uniform_index_max_offset_p1_i32(
 ; OPT: {{^}}.lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i8, i8 addrspace(1)* %arg1, i64 4096
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(1) %arg1, i64 4096
 ; OPT: br label %.lr.ph
 
 ; OPT: {{^}}.lr.ph:
-; OPT: %lsr.iv3 = phi i8 addrspace(1)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv3, i64 1
-define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %lsr.iv3 = phi ptr addrspace(1) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(1) %lsr.iv3, i64 1
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(1) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -68,13 +68,13 @@ bb:
 .lr.ph:                                           ; preds = %.lr.ph, %.lr.ph.preheader
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i64 %indvars.iv, 4096
-  %tmp2 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %tmp1
-  %tmp3 = load i8, i8 addrspace(1)* %tmp2, align 1
+  %tmp2 = getelementptr inbounds i8, ptr addrspace(1) %arg1, i64 %tmp1
+  %tmp3 = load i8, ptr addrspace(1) %tmp2, align 1
   %tmp4 = sext i8 %tmp3 to i32
-  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv
-  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
+  %tmp5 = getelementptr inbounds i32, ptr addrspace(1) %arg0, i64 %indvars.iv
+  %tmp6 = load i32, ptr addrspace(1) %tmp5, align 4
   %tmp7 = add nsw i32 %tmp6, %tmp4
-  store i32 %tmp7, i32 addrspace(1)* %tmp5, align 4
+  store i32 %tmp7, ptr addrspace(1) %tmp5, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -83,13 +83,13 @@ bb:
 
 ; OPT-LABEL: @test_local_addressing_loop_uniform_index_max_offset_i32(
 ; OPT: .lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i8, i8 addrspace(3)* %arg1, i32 65535
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65535
 ; OPT: br label %.lr.ph
 ; OPT: {{^}}.lr.ph
-; OPT: %lsr.iv3 = phi i8 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %tmp4 = load i8, i8 addrspace(3)* %lsr.iv3, align 1
-; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv3, i32 1
-define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %tmp4 = load i8, ptr addrspace(3) %lsr.iv3, align 1
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -107,13 +107,13 @@ bb:
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i64 %indvars.iv, 65535
   %tmp2 = trunc i64 %tmp1 to i32
-  %tmp3 = getelementptr inbounds i8, i8 addrspace(3)* %arg1, i32 %tmp2
-  %tmp4 = load i8, i8 addrspace(3)* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr addrspace(3) %arg1, i32 %tmp2
+  %tmp4 = load i8, ptr addrspace(3) %tmp3, align 1
   %tmp5 = sext i8 %tmp4 to i32
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv
-  %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(1) %arg0, i64 %indvars.iv
+  %tmp7 = load i32, ptr addrspace(1) %tmp6, align 4
   %tmp8 = add nsw i32 %tmp7, %tmp5
-  store i32 %tmp8, i32 addrspace(1)* %tmp6, align 4
+  store i32 %tmp8, ptr addrspace(1) %tmp6, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -122,13 +122,13 @@ bb:
 
 ; OPT-LABEL: @test_local_addressing_loop_uniform_index_max_offset_p1_i32(
 ; OPT: {{^}}.lr.ph.preheader:
-; OPT: %scevgep2 = getelementptr i8, i8 addrspace(3)* %arg1, i32 65536
+; OPT: %uglygep2 = getelementptr i8, ptr addrspace(3) %arg1, i32 65536
 ; OPT: br label %.lr.ph
 
 ; OPT: {{^}}.lr.ph:
-; OPT: %lsr.iv3 = phi i8 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
-; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv3, i32 1
-define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+; OPT: %lsr.iv3 = phi ptr addrspace(3) [ %uglygep4, %.lr.ph ], [ %uglygep2, %.lr.ph.preheader ]
+; OPT: %uglygep4 = getelementptr i8, ptr addrspace(3) %lsr.iv3, i32 1
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(ptr addrspace(1) noalias nocapture %arg0, ptr addrspace(3) noalias nocapture readonly %arg1, i32 %n) #0 {
 bb:
   %tmp = icmp sgt i32 %n, 0
   br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -146,13 +146,13 @@ bb:
   %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
   %tmp1 = add nuw nsw i64 %indvars.iv, 65536
   %tmp2 = trunc i64 %tmp1 to i32
-  %tmp3 = getelementptr inbounds i8, i8 addrspace(3)* %arg1, i32 %tmp2
-  %tmp4 = load i8, i8 addrspace(3)* %tmp3, align 1
+  %tmp3 = getelementptr inbounds i8, ptr addrspace(3) %arg1, i32 %tmp2
+  %tmp4 = load i8, ptr addrspace(3) %tmp3, align 1
   %tmp5 = sext i8 %tmp4 to i32
-  %tmp6 = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i64 %indvars.iv
-  %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4
+  %tmp6 = getelementptr inbounds i32, ptr addrspace(1) %arg0, i64 %indvars.iv
+  %tmp7 = load i32, ptr addrspace(1) %tmp6, align 4
   %tmp8 = add nsw i32 %tmp7, %tmp5
-  store i32 %tmp8, i32 addrspace(1)* %tmp6, align 4
+  store i32 %tmp8, ptr addrspace(1) %tmp6, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %lftr.wideiv = trunc i64 %indvars.iv.next to i32
   %exitcond = icmp eq i32 %lftr.wideiv, %n

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
index d51a1943690be..c17ca9643a2ab 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-invalid-ptr-extend.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -opaque-pointers=0 -march=amdgcn -loop-reduce -S < %s | FileCheck %s
+; RUN: opt -march=amdgcn -loop-reduce -S < %s | FileCheck %s
 ; REQUIRES: asserts
 
 ; Test that LSR does not attempt to extend a pointer type to an integer type,
@@ -18,18 +18,18 @@ define amdgpu_kernel void @scaledregtest() local_unnamed_addr {
 ; CHECK:       loopexit:
 ; CHECK-NEXT:    br label [[FOR_BODY_1:%.*]]
 ; CHECK:       for.body.1:
-; CHECK-NEXT:    [[LSR_IV5:%.*]] = phi i8* addrspace(5)* [ [[SCEVGEP6:%.*]], [[FOR_BODY_1]] ], [ [[SCEVGEP11:%.*]], [[LOOPEXIT:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i8** [ [[SCEVGEP2:%.*]], [[FOR_BODY_1]] ], [ [[SCEVGEP13:%.*]], [[LOOPEXIT]] ]
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8*, i8* addrspace(5)* [[LSR_IV5]], align 8
-; CHECK-NEXT:    store i8* [[TMP0]], i8** [[LSR_IV1]], align 8
-; CHECK-NEXT:    [[SCEVGEP2]] = getelementptr i8*, i8** [[LSR_IV1]], i64 1
-; CHECK-NEXT:    [[SCEVGEP6]] = getelementptr i8*, i8* addrspace(5)* [[LSR_IV5]], i32 1
+; CHECK-NEXT:    [[LSR_IV5:%.*]] = phi ptr addrspace(5) [ [[UGLYGEP6:%.*]], [[FOR_BODY_1]] ], [ [[UGLYGEP11:%.*]], [[LOOPEXIT:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi ptr [ [[UGLYGEP2:%.*]], [[FOR_BODY_1]] ], [ [[UGLYGEP13:%.*]], [[LOOPEXIT]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr addrspace(5) [[LSR_IV5]], align 8
+; CHECK-NEXT:    store ptr [[TMP0]], ptr [[LSR_IV1]], align 8
+; CHECK-NEXT:    [[UGLYGEP2]] = getelementptr i8, ptr [[LSR_IV1]], i64 8
+; CHECK-NEXT:    [[UGLYGEP6]] = getelementptr i8, ptr addrspace(5) [[LSR_IV5]], i32 8
 ; CHECK-NEXT:    br label [[FOR_BODY_1]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[LSR_IV12:%.*]] = phi i8** [ [[SCEVGEP13]], [[FOR_BODY]] ], [ null, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV10:%.*]] = phi i8* addrspace(5)* [ [[SCEVGEP11]], [[FOR_BODY]] ], [ null, [[ENTRY]] ]
-; CHECK-NEXT:    [[SCEVGEP11]] = getelementptr i8*, i8* addrspace(5)* [[LSR_IV10]], i32 8
-; CHECK-NEXT:    [[SCEVGEP13]] = getelementptr i8*, i8** [[LSR_IV12]], i64 8
+; CHECK-NEXT:    [[LSR_IV12:%.*]] = phi ptr [ [[UGLYGEP13]], [[FOR_BODY]] ], [ null, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV10:%.*]] = phi ptr addrspace(5) [ [[UGLYGEP11]], [[FOR_BODY]] ], [ null, [[ENTRY]] ]
+; CHECK-NEXT:    [[UGLYGEP11]] = getelementptr i8, ptr addrspace(5) [[LSR_IV10]], i32 64
+; CHECK-NEXT:    [[UGLYGEP13]] = getelementptr i8, ptr [[LSR_IV12]], i64 64
 ; CHECK-NEXT:    br i1 false, label [[LOOPEXIT]], label [[FOR_BODY]]
 ;
 entry:
@@ -43,10 +43,10 @@ for.body.1:
   %conv.1 = phi i64 [ %conv.2, %for.body.1 ], [ %conv, %loopexit ]
   %I.1 = phi i32 [ %inc.1, %for.body.1 ], [ %inc, %loopexit ]
   %idxprom = trunc i64 %conv.1 to i32
-  %arrayidx = getelementptr inbounds i8*, i8* addrspace(5)* null, i32 %idxprom
-  %0 = load i8*, i8* addrspace(5)* %arrayidx, align 8
-  %arrayidx.1 = getelementptr inbounds i8*, i8** null, i64 %conv.1
-  store i8* %0, i8** %arrayidx.1, align 8
+  %arrayidx = getelementptr inbounds ptr, ptr addrspace(5) null, i32 %idxprom
+  %0 = load ptr, ptr addrspace(5) %arrayidx, align 8
+  %arrayidx.1 = getelementptr inbounds ptr, ptr null, i64 %conv.1
+  store ptr %0, ptr %arrayidx.1, align 8
   %inc.1 = add nuw nsw i32 %I.1, 1
   %conv.2 = zext i32 %inc.1 to i64
   br label %for.body.1
@@ -63,26 +63,22 @@ define protected amdgpu_kernel void @baseregtest(i32 %n, i32 %lda) local_unnamed
 ; CHECK-NEXT:    br i1 undef, label [[EXIT:%.*]], label [[IF_END:%.*]]
 ; CHECK:       if.end:
 ; CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @foo()
-; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [1024 x double], [1024 x double] addrspace(3)* @gVar, i32 0, i32 [[TMP0]]
-; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast double addrspace(3)* [[SCEVGEP]] to [1024 x double] addrspace(3)*
-; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[N:%.*]], 3
-; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT:    [[SCEVGEP5:%.*]] = getelementptr double, double addrspace(1)* null, i64 [[TMP2]]
-; CHECK-NEXT:    [[TMP3:%.*]] = sext i32 [[LDA:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 3
+; CHECK-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, ptr addrspace(3) @gVar, i32 [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = shl i32 [[N:%.*]], 3
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i32 [[TMP0]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = shl nsw i64 [[TMP3]], 3
+; CHECK-NEXT:    [[UGLYGEP2:%.*]] = getelementptr i8, ptr addrspace(1) null, i64 [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = sext i32 [[LDA:%.*]] to i64
+; CHECK-NEXT:    [[TMP6:%.*]] = shl nsw i64 [[TMP5]], 3
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[LSR_IV6:%.*]] = phi double addrspace(1)* [ [[TMP7:%.*]], [[FOR_BODY]] ], [ [[SCEVGEP5]], [[IF_END]] ]
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi [1024 x double] addrspace(3)* [ [[TMP6:%.*]], [[FOR_BODY]] ], [ [[SCEVGEP1]], [[IF_END]] ]
-; CHECK-NEXT:    [[LSR_IV2:%.*]] = bitcast [1024 x double] addrspace(3)* [[LSR_IV]] to i1 addrspace(3)*
-; CHECK-NEXT:    [[LSR_IV4:%.*]] = bitcast [1024 x double] addrspace(3)* [[LSR_IV]] to double addrspace(3)*
-; CHECK-NEXT:    [[LSR_IV67:%.*]] = bitcast double addrspace(1)* [[LSR_IV6]] to i1 addrspace(1)*
-; CHECK-NEXT:    [[TMP5:%.*]] = load double, double addrspace(1)* [[LSR_IV6]], align 8
-; CHECK-NEXT:    store double [[TMP5]], double addrspace(3)* [[LSR_IV4]], align 8
-; CHECK-NEXT:    [[SCEVGEP3:%.*]] = getelementptr i1, i1 addrspace(3)* [[LSR_IV2]], i32 [[TMP1]]
-; CHECK-NEXT:    [[TMP6]] = bitcast i1 addrspace(3)* [[SCEVGEP3]] to [1024 x double] addrspace(3)*
-; CHECK-NEXT:    [[SCEVGEP8:%.*]] = getelementptr i1, i1 addrspace(1)* [[LSR_IV67]], i64 [[TMP4]]
-; CHECK-NEXT:    [[TMP7]] = bitcast i1 addrspace(1)* [[SCEVGEP8]] to double addrspace(1)*
+; CHECK-NEXT:    [[LSR_IV3:%.*]] = phi ptr addrspace(1) [ [[UGLYGEP4:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP2]], [[IF_END]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi ptr addrspace(3) [ [[UGLYGEP1:%.*]], [[FOR_BODY]] ], [ [[UGLYGEP]], [[IF_END]] ]
+; CHECK-NEXT:    [[TMP7:%.*]] = load double, ptr addrspace(1) [[LSR_IV3]], align 8
+; CHECK-NEXT:    store double [[TMP7]], ptr addrspace(3) [[LSR_IV]], align 8
+; CHECK-NEXT:    [[UGLYGEP1]] = getelementptr i8, ptr addrspace(3) [[LSR_IV]], i32 [[TMP2]]
+; CHECK-NEXT:    [[UGLYGEP4]] = getelementptr i8, ptr addrspace(1) [[LSR_IV3]], i64 [[TMP6]]
 ; CHECK-NEXT:    br label [[FOR_BODY]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -99,12 +95,12 @@ for.body:
   %mul1 = mul nsw i32 %i, %lda
   %add1 = add nsw i32 %mul1, %0
   %idxprom = sext i32 %add1 to i64
-  %arrayidx = getelementptr inbounds double, double addrspace(1)* null, i64 %idxprom
-  %1 = load double, double addrspace(1)* %arrayidx, align 8
+  %arrayidx = getelementptr inbounds double, ptr addrspace(1) null, i64 %idxprom
+  %1 = load double, ptr addrspace(1) %arrayidx, align 8
   %mul2 = mul nsw i32 %i, %n
   %add2 = add nsw i32 %mul2, %0
-  %arrayidx9110 = getelementptr inbounds [1024 x double], [1024 x double] addrspace(3)* @gVar, i32 0, i32 %add2
-  store double %1, double addrspace(3)* %arrayidx9110, align 8
+  %arrayidx9110 = getelementptr inbounds [1024 x double], ptr addrspace(3) @gVar, i32 0, i32 %add2
+  store double %1, ptr addrspace(3) %arrayidx9110, align 8
   %inc = add nuw nsw i32 %i, 1
   br label %for.body
 

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
index d14cfc7c0d911..2006ac0e6de6d 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
@@ -1,4 +1,4 @@
-; RUN: opt -opaque-pointers=0 -S -mtriple=amdgcn-amd-amdhsa -loop-reduce %s | FileCheck %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -loop-reduce %s | FileCheck %s
 
 ; Test for assert resulting from inconsistent isLegalAddressingMode
 ; answers when the address space was dropped from the query.
@@ -8,20 +8,19 @@ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:3
 %0 = type { i32, double, i32, float }
 
 ; CHECK-LABEL: @lsr_crash_preserve_addrspace_unknown_type(
-; CHECK: %tmp4 = bitcast %0 addrspace(3)* %tmp to double addrspace(3)*
-; CHECK: %scevgep5 = getelementptr double, double addrspace(3)* %tmp4, i32 1
-; CHECK: load double, double addrspace(3)* %scevgep5
+; CHECK: %uglygep1 = getelementptr i8, ptr addrspace(3) %tmp, i32 8
+; CHECK: load double, ptr addrspace(3) %uglygep1
 
-; CHECK: %scevgep = getelementptr i32, i32 addrspace(3)* %tmp1, i32 4
-; CHECK:%tmp14 = load i32, i32 addrspace(3)* %scevgep
+; CHECK: %uglygep = getelementptr i8, ptr addrspace(3) %tmp, i32 16
+; CHECK: %tmp14 = load i32, ptr addrspace(3) %uglygep
 define amdgpu_kernel void @lsr_crash_preserve_addrspace_unknown_type() #0 {
 bb:
   br label %bb1
 
 bb1:                                              ; preds = %bb17, %bb
-  %tmp = phi %0 addrspace(3)* [ undef, %bb ], [ %tmp18, %bb17 ]
-  %tmp2 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 1
-  %tmp3 = load double, double addrspace(3)* %tmp2, align 8
+  %tmp = phi ptr addrspace(3) [ undef, %bb ], [ %tmp18, %bb17 ]
+  %tmp2 = getelementptr inbounds %0, ptr addrspace(3) %tmp, i64 0, i32 1
+  %tmp3 = load double, ptr addrspace(3) %tmp2, align 8
   br label %bb4
 
 bb4:                                              ; preds = %bb1
@@ -31,14 +30,13 @@ bb5:                                              ; preds = %bb4
   unreachable
 
 bb8:                                              ; preds = %bb4
-  %tmp9 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 0
-  %tmp10 = load i32, i32 addrspace(3)* %tmp9, align 4
+  %tmp10 = load i32, ptr addrspace(3) %tmp, align 4
   %tmp11 = icmp eq i32 0, %tmp10
   br i1 %tmp11, label %bb12, label %bb17
 
 bb12:                                             ; preds = %bb8
-  %tmp13 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 0, i32 2
-  %tmp14 = load i32, i32 addrspace(3)* %tmp13, align 4
+  %tmp13 = getelementptr inbounds %0, ptr addrspace(3) %tmp, i64 0, i32 2
+  %tmp14 = load i32, ptr addrspace(3) %tmp13, align 4
   %tmp15 = icmp eq i32 0, %tmp14
   br i1 %tmp15, label %bb16, label %bb17
 
@@ -46,37 +44,38 @@ bb16:                                             ; preds = %bb12
   unreachable
 
 bb17:                                             ; preds = %bb12, %bb8
-  %tmp18 = getelementptr inbounds %0, %0 addrspace(3)* %tmp, i64 2
+  %tmp18 = getelementptr inbounds %0, ptr addrspace(3) %tmp, i64 2
   br label %bb1
 }
 
 ; CHECK-LABEL: @lsr_crash_preserve_addrspace_unknown_type2(
-; CHECK: %scevgep3 = getelementptr i8, i8 addrspace(5)* %array, i32 %j
-; CHECK: %scevgep2 = getelementptr i8, i8 addrspace(5)* %array, i32 %j
-; CHECK: %n8 = load i8, i8 addrspace(5)* %scevgep2, align 4
-; CHECK: call void @llvm.memcpy.p5i8.p3i8.i64(i8 addrspace(5)* %scevgep3, i8 addrspace(3)* %scevgep4, i64 42, i1 false)
-; CHECK: call void @llvm.memmove.p5i8.p3i8.i64(i8 addrspace(5)* %scevgep3, i8 addrspace(3)* %scevgep4, i64 42, i1 false)
-; CHECK: call void @llvm.memset.p5i8.i64(i8 addrspace(5)* %scevgep3, i8 42, i64 42, i1 false)
-define void @lsr_crash_preserve_addrspace_unknown_type2(i8 addrspace(5)* %array, i8 addrspace(3)* %array2) {
+; CHECK: %idx = getelementptr inbounds i8, ptr addrspace(5) %array, i32 %j
+; CHECK: %idx1 = getelementptr inbounds i8, ptr addrspace(3) %array2, i32 %j
+; CHECK: %t = getelementptr inbounds i8, ptr addrspace(5) %array, i32 %j
+; CHECK: %n8 = load i8, ptr addrspace(5) %t, align 4
+; CHECK: call void @llvm.memcpy.p5.p3.i64(ptr addrspace(5) %idx, ptr addrspace(3) %idx1, i64 42, i1 false)
+; CHECK: call void @llvm.memmove.p5.p3.i64(ptr addrspace(5) %idx, ptr addrspace(3) %idx1, i64 42, i1 false)
+; CHECK: call void @llvm.memset.p5.i64(ptr addrspace(5) %idx, i8 42, i64 42, i1 false)
+define void @lsr_crash_preserve_addrspace_unknown_type2(ptr addrspace(5) %array, ptr addrspace(3) %array2) {
 entry:
   br label %for.body
 
 for.body:                                         ; preds = %entry, %for.inc
   %j = phi i32 [ %add, %for.inc ], [ 0, %entry ]
-  %idx = getelementptr inbounds i8, i8 addrspace(5)* %array, i32 %j
-  %idx1 = getelementptr inbounds i8, i8 addrspace(3)* %array2, i32 %j
-  %t = getelementptr inbounds i8, i8 addrspace(5)* %array, i32 %j
-  %n8 = load i8, i8 addrspace(5)* %t, align 4
-  %n7 = getelementptr inbounds i8, i8 addrspace(5)* %t, i32 42
-  %n9 = load i8, i8 addrspace(5)* %n7, align 4
+  %idx = getelementptr inbounds i8, ptr addrspace(5) %array, i32 %j
+  %idx1 = getelementptr inbounds i8, ptr addrspace(3) %array2, i32 %j
+  %t = getelementptr inbounds i8, ptr addrspace(5) %array, i32 %j
+  %n8 = load i8, ptr addrspace(5) %t, align 4
+  %n7 = getelementptr inbounds i8, ptr addrspace(5) %t, i32 42
+  %n9 = load i8, ptr addrspace(5) %n7, align 4
   %cmp = icmp sgt i32 %j, 42
   %add = add nuw nsw i32 %j, 1
   br i1 %cmp, label %if.then17, label %for.inc
 
 if.then17:                                        ; preds = %for.body
-  call void @llvm.memcpy.p5i8.p5i8.i64(i8 addrspace(5)* %idx, i8 addrspace(3)* %idx1, i64 42, i1 false)
-  call void @llvm.memmove.p5i8.p5i8.i64(i8 addrspace(5)* %idx, i8 addrspace(3)* %idx1, i64 42, i1 false)
-  call void @llvm.memset.p5i8.i64(i8 addrspace(5)* %idx, i8 42, i64 42, i1 false)
+  call void @llvm.memcpy.p5.p5.i64(ptr addrspace(5) %idx, ptr addrspace(3) %idx1, i64 42, i1 false)
+  call void @llvm.memmove.p5.p5.i64(ptr addrspace(5) %idx, ptr addrspace(3) %idx1, i64 42, i1 false)
+  call void @llvm.memset.p5.i64(ptr addrspace(5) %idx, i8 42, i64 42, i1 false)
   br label %for.inc
 
 for.inc:                                          ; preds = %for.body, %if.then17
@@ -87,9 +86,9 @@ end:                                              ; preds = %for.inc
   ret void
 }
 
-declare void @llvm.memcpy.p5i8.p5i8.i64(i8 addrspace(5)*, i8 addrspace(3)*, i64, i1)
-declare void @llvm.memmove.p5i8.p5i8.i64(i8 addrspace(5)*, i8 addrspace(3)*, i64, i1)
-declare void @llvm.memset.p5i8.i64(i8 addrspace(5)*, i8, i64, i1)
+declare void @llvm.memcpy.p5.p5.i64(ptr addrspace(5), ptr addrspace(3), i64, i1)
+declare void @llvm.memmove.p5.p5.i64(ptr addrspace(5), ptr addrspace(3), i64, i1)
+declare void @llvm.memset.p5.i64(ptr addrspace(5), i8, i64, i1)
 
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list