[llvm] 4199f80 - [LAA] Adjust test from a4f8705b05 so RT checks aren't always false.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 16 13:57:21 PDT 2024


Author: Florian Hahn
Date: 2024-07-16T21:56:57+01:00
New Revision: 4199f80df58624bbd8491688b4ab6aefdfec2726

URL: https://github.com/llvm/llvm-project/commit/4199f80df58624bbd8491688b4ab6aefdfec2726
DIFF: https://github.com/llvm/llvm-project/commit/4199f80df58624bbd8491688b4ab6aefdfec2726.diff

LOG: [LAA] Adjust test from a4f8705b05 so RT checks aren't always false.

Updated @B_indices_loaded_in_loop_A_stored to use a different offset
for one of the accesses we create runtime checks for; the original
version had a runtime check that was always true as the accesses always
overlapped.

Added: 
    

Modified: 
    llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll b/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll
index 50eec242de37b..2e61a28039846 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll
@@ -6,7 +6,7 @@
 ; same indices could be loaded in later iterations.
 ; FIXME: currently this is incorrectly considered safe for vectorization with
 ; runtime checks
-define void @B_indices_loaded_in_loop_A_stored(ptr %A, ptr noalias %B, i64 %N) {
+define void @B_indices_loaded_in_loop_A_stored(ptr %A, ptr noalias %B, i64 %N, i64 %off) {
 ; CHECK-LABEL: 'B_indices_loaded_in_loop_A_stored'
 ; CHECK-NEXT:    loop:
 ; CHECK-NEXT:      Memory dependences are safe with run-time checks
@@ -14,16 +14,16 @@ define void @B_indices_loaded_in_loop_A_stored(ptr %A, ptr noalias %B, i64 %N) {
 ; CHECK-NEXT:      Run-time memory checks:
 ; CHECK-NEXT:      Check 0:
 ; CHECK-NEXT:        Comparing group ([[GRP1:0x[0-9a-f]+]]):
-; CHECK-NEXT:          %gep.A.1 = getelementptr inbounds i32, ptr %A, i64 %iv
+; CHECK-NEXT:          %gep.C = getelementptr inbounds i32, ptr %A, i64 %iv
 ; CHECK-NEXT:        Against group ([[GRP2:0x[0-9a-f]+]]):
-; CHECK-NEXT:          %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
+; CHECK-NEXT:          %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv.off
 ; CHECK-NEXT:      Grouped accesses:
 ; CHECK-NEXT:        Group [[GRP1]]:
 ; CHECK-NEXT:          (Low: %A High: ((4 * %N) + %A))
 ; CHECK-NEXT:            Member: {%A,+,4}<nuw><%loop>
 ; CHECK-NEXT:        Group [[GRP2]]:
-; CHECK-NEXT:          (Low: %A High: (%N + %A))
-; CHECK-NEXT:            Member: {%A,+,1}<nuw><%loop>
+; CHECK-NEXT:          (Low: (%off + %A) High: (%N + %off + %A))
+; CHECK-NEXT:            Member: {(%off + %A),+,1}<nw><%loop>
 ; CHECK-EMPTY:
 ; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
 ; CHECK-NEXT:      SCEV assumptions:
@@ -35,15 +35,16 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
-  %indices = load i8, ptr %gep.A.0, align 1
+  %iv.off = add nuw nsw i64 %iv, %off
+  %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv.off
+  %indices = load i8, ptr %gep.A, align 1
   %indices.ext = zext i8 %indices to i64
   %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext
   %l = load i32, ptr %gep.B, align 4
   %inc = add i32 %l, 1
   store i32 %inc, ptr %gep.B, align 4
-  %gep.A.1 = getelementptr inbounds i32, ptr %A, i64 %iv
-  store i32 %l, ptr %gep.A.1, align 4
+  %gep.C = getelementptr inbounds i32, ptr %A, i64 %iv
+  store i32 %l, ptr %gep.C, align 4
   %iv.next = add nuw nsw i64 %iv, 1
   %ec = icmp eq i64 %iv.next, %N
   br i1 %ec, label %exit, label %loop
@@ -77,8 +78,8 @@ entry:
 
 loop:
   %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
-  %indices = load i8, ptr %gep.A.0, align 1
+  %gep.A = getelementptr inbounds i8, ptr %A, i64 %iv
+  %indices = load i8, ptr %gep.A, align 1
   %indices.ext = zext i8 %indices to i64
   %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext
   %l = load i32, ptr %gep.B, align 4


        


More information about the llvm-commits mailing list