[llvm] a4f8705 - [LAA] Precommit test with loops where indices are loaded in each iter.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 13 13:27:51 PDT 2024


Author: Florian Hahn
Date: 2024-07-13T21:25:32+01:00
New Revision: a4f8705b05bef13d09e243cc3ebaf4ec9f5355b9

URL: https://github.com/llvm/llvm-project/commit/a4f8705b05bef13d09e243cc3ebaf4ec9f5355b9
DIFF: https://github.com/llvm/llvm-project/commit/a4f8705b05bef13d09e243cc3ebaf4ec9f5355b9.diff

LOG: [LAA] Precommit test with loops where indices are loaded in each iter.

Add tests which are not safe to vectorize because %indices are loaded in
the loop and the same indices could be loaded in later iterations.

Tests for https://github.com/llvm/llvm-project/issues/87189.

Added: 
    llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll b/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll
new file mode 100644
index 0000000000000..50eec242de37b
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/load-store-index-loaded-in-loop.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<access-info>' -disable-output %s 2>&1 | FileCheck %s
+
+; Test case for https://github.com/llvm/llvm-project/issues/87189.
+; It is not safe to vectorize because %indices are loaded in the loop and the
+; same indices could be loaded in later iterations.
+; FIXME: currently this is incorrectly considered safe for vectorization with
+; runtime checks
+define void @B_indices_loaded_in_loop_A_stored(ptr %A, ptr noalias %B, i64 %N) {
+; CHECK-LABEL: 'B_indices_loaded_in_loop_A_stored'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Memory dependences are safe with run-time checks
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Check 0:
+; CHECK-NEXT:        Comparing group ([[GRP1:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %gep.A.1 = getelementptr inbounds i32, ptr %A, i64 %iv
+; CHECK-NEXT:        Against group ([[GRP2:0x[0-9a-f]+]]):
+; CHECK-NEXT:          %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-NEXT:        Group [[GRP1]]:
+; CHECK-NEXT:          (Low: %A High: ((4 * %N) + %A))
+; CHECK-NEXT:            Member: {%A,+,4}<nuw><%loop>
+; CHECK-NEXT:        Group [[GRP2]]:
+; CHECK-NEXT:          (Low: %A High: (%N + %A))
+; CHECK-NEXT:            Member: {%A,+,1}<nuw><%loop>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
+  %indices = load i8, ptr %gep.A.0, align 1
+  %indices.ext = zext i8 %indices to i64
+  %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext
+  %l = load i32, ptr %gep.B, align 4
+  %inc = add i32 %l, 1
+  store i32 %inc, ptr %gep.B, align 4
+  %gep.A.1 = getelementptr inbounds i32, ptr %A, i64 %iv
+  store i32 %l, ptr %gep.A.1, align 4
+  %iv.next = add nuw nsw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %N
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; It is not safe to vectorize because %indices are loaded in the loop and the
+; same indices could be loaded in later iterations.
+define void @B_indices_loaded_in_loop_A_not_stored(ptr %A, ptr noalias %B, i64 %N) {
+; CHECK-LABEL: 'B_indices_loaded_in_loop_A_not_stored'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.B, align 4 ->
+; CHECK-NEXT:            store i32 %inc, ptr %gep.B, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %gep.A.0 = getelementptr inbounds i8, ptr %A, i64 %iv
+  %indices = load i8, ptr %gep.A.0, align 1
+  %indices.ext = zext i8 %indices to i64
+  %gep.B = getelementptr inbounds i32, ptr %B, i64 %indices.ext
+  %l = load i32, ptr %gep.B, align 4
+  %inc = add i32 %l, 1
+  store i32 %inc, ptr %gep.B, align 4
+  %iv.next = add nuw nsw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %N
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list