[llvm] 972353f - [LAA] Add tests where results can be improved using loop guards.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 4 03:28:49 PDT 2024


Author: Florian Hahn
Date: 2024-10-04T11:26:16+01:00
New Revision: 972353fdfad02ae3206c91c80dd4eaa997b3d499

URL: https://github.com/llvm/llvm-project/commit/972353fdfad02ae3206c91c80dd4eaa997b3d499
DIFF: https://github.com/llvm/llvm-project/commit/972353fdfad02ae3206c91c80dd4eaa997b3d499.diff

LOG: [LAA] Add tests where results can be improved using loop guards.

Added: 
    llvm/test/Analysis/LoopAccessAnalysis/no-dep-via-loop-guards.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/no-dep-via-loop-guards.ll b/llvm/test/Analysis/LoopAccessAnalysis/no-dep-via-loop-guards.ll
new file mode 100644
index 00000000000000..5ff8eb59cfbf19
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/no-dep-via-loop-guards.ll
@@ -0,0 +1,363 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<access-info>' -disable-output < %s  2>&1 | FileCheck %s
+
+; Loop guard for %off guarantees the accesses in the loop do not overlap.
+; TODO: currently missed by LAA
+define void @access_after_via_loop_guard(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp eq i64 %off, 100
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 %off
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 100
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; Loop guard for %off guarantees the accesses in the loop do not overlap.
+; TODO: currently missed by LAA
+define void @access_after_via_loop_guard_sge(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_sge'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp sge i64 %off, 100
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 %off
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 100
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+define void @access_after_via_loop_guard_99(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_99'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp eq i64 %off, 99
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 %off
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 100
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; Loop guard for %off guarantees the accesses in the loop do not overlap.
+; TODO: currently missed by LAA
+define void @access_after_via_loop_guard_sge_99(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_sge_99'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp sge i64 %off, 99
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 %off
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 100
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+define void @access_after_via_loop_guard_uge(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_uge'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp uge i64 %off, 100
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 %off
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, 100
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; Loop guard for %off guarantees the accesses in the loop do not overlap.
+; TODO: currently missed by LAA
+define void @access_after_via_loop_guard_eq_loop_cond(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_eq_loop_cond'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp eq i64 %off, 100
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 100
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %off
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+define void @access_after_via_loop_guard_eq_loop_cond_100(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_eq_loop_cond_100'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp eq i64 %off, 101
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 100
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %off
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+; Loop guard for %off guarantees the accesses in the loop do not overlap.
+; TODO: currently missed by LAA
+define void @access_after_via_loop_guard_sge_loop_cond(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_sge_loop_cond'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp sge i64 %off, 100
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 100
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %off
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+define void @access_after_via_loop_guard_sge_loop_cond_101(ptr %a, i64 %off) {
+; CHECK-LABEL: 'access_after_via_loop_guard_sge_loop_cond_101'
+; CHECK-NEXT:    loop:
+; CHECK-NEXT:      Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT:  Unknown data dependence.
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Unknown:
+; CHECK-NEXT:            %l = load i32, ptr %gep.after, align 4 ->
+; CHECK-NEXT:            store i32 %add, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+;
+  %c = icmp sge i64 %off, 101
+  br i1 %c, label %ph, label %exit
+
+ph:
+  %gep.after = getelementptr inbounds nuw i32, ptr %a, i64 100
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %ph ], [ %iv.next, %loop ]
+  %l = load i32 , ptr %gep.after, align 4
+  %add = add i32 %l, %l
+  %gep = getelementptr inbounds i32, ptr %a, i64 %iv
+  store i32 %add, ptr %gep, align 4
+  %iv.next = add nsw nuw i64 %iv, 1
+  %ec = icmp eq i64 %iv.next, %off
+  br i1 %ec, label %exit, label %loop
+
+exit:
+  ret void
+}


        


More information about the llvm-commits mailing list