[llvm] a94f081 - LAA: regen a test with UTC (NFC) (#122748)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 14 01:02:36 PST 2025


Author: Ramkumar Ramachandra
Date: 2025-01-14T09:02:33Z
New Revision: a94f08174c0312bca0ff6405640eb8a3ff986084

URL: https://github.com/llvm/llvm-project/commit/a94f08174c0312bca0ff6405640eb8a3ff986084
DIFF: https://github.com/llvm/llvm-project/commit/a94f08174c0312bca0ff6405640eb8a3ff986084.diff

LOG: LAA: regen a test with UTC (NFC) (#122748)

Added: 
    

Modified: 
    llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
index 1ebe91a044b783..71c20bc2b2a824 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/wrapping-pointer-versioning.ll
@@ -1,6 +1,6 @@
-; RUN: opt -passes='print<access-info>' -aa-pipeline='basic-aa' -disable-output < %s  2>&1 | FileCheck %s --check-prefix=LAA
-
-target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes='print<access-info>' -aa-pipeline='basic-aa' \
+; RUN:   -disable-output %s 2>&1 | FileCheck %s
 
 ; For this loop:
 ;   unsigned index = 0;
@@ -19,24 +19,33 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 ; to check that the pointers don't wrap since the GEPs are not
 ; inbound.
 
-; LAA-LABEL: f1
-; LAA: Memory dependences are safe{{$}}
-; LAA: SCEV assumptions:
-; LAA:      {0,+,2}<%for.body> Added Flags: <nusw>
-; LAA-NEXT: {%a,+,4}<%for.body> Added Flags: <nusw>
-
 ; The expression for %mul_ext as analyzed by SCEV is
 ;    (zext i32 {0,+,2}<%for.body> to i64)
 ; We have added the nusw flag to turn this expression into the SCEV expression:
 ;    i64 {0,+,2}<%for.body>
 
-; LAA: [PSE]  %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
-; LAA-NEXT: ((2 * (zext i32 {0,+,2}<%for.body> to i64))<nuw><nsw> + %a)
-; LAA-NEXT: --> {%a,+,4}<%for.body>
-
-
-define void @f1(ptr noalias %a,
-                ptr noalias %b, i64 %N) {
+define void @f1(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: 'f1'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Forward:
+; CHECK-NEXT:            %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT:            store i16 %add, ptr %arrayidxA, align 2
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-NEXT:      {0,+,2}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT:      {%a,+,4}<%for.body> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+; CHECK-NEXT:      [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
+; CHECK-NEXT:        ((2 * (zext i32 {0,+,2}<%for.body> to i64))<nuw><nsw> + %a)
+; CHECK-NEXT:        --> {%a,+,4}<%for.body>
+;
 entry:
   br label %for.body
 
@@ -86,23 +95,33 @@ for.end:                                          ; preds = %for.body
 ; This loop has a negative stride for A, and the nusw flag is required in
 ; order to properly extend the increment from i32 -4 to i64 -4.
 
-; LAA-LABEL: f2
-; LAA: Memory dependences are safe{{$}}
-; LAA: SCEV assumptions:
-; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nusw>
-; LAA-NEXT: {((4 * (zext i31 (trunc i64 %N to i31) to i64))<nuw><nsw> + %a),+,-4}<%for.body> Added Flags: <nusw>
-
 ; The expression for %mul_ext as analyzed by SCEV is
 ;     (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
 ; We have added the nusw flag to turn this expression into the following SCEV:
 ;     i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
 
-; LAA: [PSE]  %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
-; LAA-NEXT: ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw><nsw> + %a)
-; LAA-NEXT: --> {((4 * (zext i31 (trunc i64 %N to i31) to i64))<nuw><nsw> + %a),+,-4}<%for.body>
-
-define void @f2(ptr noalias %a,
-                ptr noalias %b, i64 %N) {
+define void @f2(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: 'f2'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Forward:
+; CHECK-NEXT:            %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT:            store i16 %add, ptr %arrayidxA, align 2
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-NEXT:      {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nusw>
+; CHECK-NEXT:      {((4 * (zext i31 (trunc i64 %N to i31) to i64))<nuw><nsw> + %a),+,-4}<%for.body> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+; CHECK-NEXT:      [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
+; CHECK-NEXT:        ((2 * (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nuw><nsw> + %a)
+; CHECK-NEXT:        --> {((4 * (zext i31 (trunc i64 %N to i31) to i64))<nuw><nsw> + %a),+,-4}<%for.body>
+;
 entry:
   %TruncN = trunc i64 %N to i32
   br label %for.body
@@ -137,23 +156,33 @@ for.end:                                          ; preds = %for.body
 ; We replicate the tests above, but this time sign extend 2 * index instead
 ; of zero extending it.
 
-; LAA-LABEL: f3
-; LAA: Memory dependences are safe{{$}}
-; LAA: SCEV assumptions:
-; LAA-NEXT: {0,+,2}<%for.body> Added Flags: <nssw>
-; LAA-NEXT: {%a,+,4}<%for.body> Added Flags: <nusw>
-
 ; The expression for %mul_ext as analyzed by SCEV is
 ;     i64 (sext i32 {0,+,2}<%for.body> to i64)
 ; We have added the nssw flag to turn this expression into the following SCEV:
 ;     i64 {0,+,2}<%for.body>
 
-; LAA: [PSE]  %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
-; LAA-NEXT: ((2 * (sext i32 {0,+,2}<%for.body> to i64))<nsw> + %a)
-; LAA-NEXT: --> {%a,+,4}<%for.body>
-
-define void @f3(ptr noalias %a,
-                ptr noalias %b, i64 %N) {
+define void @f3(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: 'f3'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Forward:
+; CHECK-NEXT:            %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT:            store i16 %add, ptr %arrayidxA, align 2
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-NEXT:      {0,+,2}<%for.body> Added Flags: <nssw>
+; CHECK-NEXT:      {%a,+,4}<%for.body> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+; CHECK-NEXT:      [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
+; CHECK-NEXT:        ((2 * (sext i32 {0,+,2}<%for.body> to i64))<nsw> + %a)
+; CHECK-NEXT:        --> {%a,+,4}<%for.body>
+;
 entry:
   br label %for.body
 
@@ -184,23 +213,33 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; LAA-LABEL: f4
-; LAA: Memory dependences are safe{{$}}
-; LAA: SCEV assumptions:
-; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nssw>
-; LAA-NEXT: {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body> Added Flags: <nusw>
-
 ; The expression for %mul_ext as analyzed by SCEV is
 ;     i64  (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
 ; We have added the nssw flag to turn this expression into the following SCEV:
 ;     i64 {sext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
 
-; LAA: [PSE]  %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
-; LAA-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
-; LAA-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
-
-define void @f4(ptr noalias %a,
-                ptr noalias %b, i64 %N) {
+define void @f4(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: 'f4'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Forward:
+; CHECK-NEXT:            %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT:            store i16 %add, ptr %arrayidxA, align 2
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-NEXT:      {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nssw>
+; CHECK-NEXT:      {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body> Added Flags: <nusw>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+; CHECK-NEXT:      [PSE] %arrayidxA = getelementptr i16, ptr %a, i64 %mul_ext:
+; CHECK-NEXT:        ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
+; CHECK-NEXT:        --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
+;
 entry:
   %TruncN = trunc i64 %N to i32
   br label %for.body
@@ -239,18 +278,27 @@ for.end:                                          ; preds = %for.body
 ;
 ; We can still analyze this by adding the required no wrap SCEV predicates.
 
-; LAA-LABEL: f5
-; LAA: Memory dependences are safe{{$}}
-; LAA: SCEV assumptions:
-; LAA-NEXT: {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nssw>
-; LAA-EMPTY:
-
-; LAA: [PSE]  %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul:
-; LAA-NEXT: ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
-; LAA-NEXT: --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
-
-define void @f5(ptr noalias %a,
-                ptr noalias %b, i64 %N) {
+define void @f5(ptr noalias %a, ptr noalias %b, i64 %N) {
+; CHECK-LABEL: 'f5'
+; CHECK-NEXT:    for.body:
+; CHECK-NEXT:      Memory dependences are safe
+; CHECK-NEXT:      Dependences:
+; CHECK-NEXT:        Forward:
+; CHECK-NEXT:            %loadA = load i16, ptr %arrayidxA, align 2 ->
+; CHECK-NEXT:            store i16 %add, ptr %arrayidxA, align 2
+; CHECK-EMPTY:
+; CHECK-NEXT:      Run-time memory checks:
+; CHECK-NEXT:      Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT:      Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT:      SCEV assumptions:
+; CHECK-NEXT:      {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> Added Flags: <nssw>
+; CHECK-EMPTY:
+; CHECK-NEXT:      Expressions re-written:
+; CHECK-NEXT:      [PSE] %arrayidxA = getelementptr inbounds i16, ptr %a, i32 %mul:
+; CHECK-NEXT:        ((2 * (sext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64))<nsw> + %a)
+; CHECK-NEXT:        --> {((2 * (sext i32 (2 * (trunc i64 %N to i32)) to i64))<nsw> + %a),+,-4}<%for.body>
+;
 entry:
   %TruncN = trunc i64 %N to i32
   br label %for.body


        


More information about the llvm-commits mailing list