[llvm] LAA: be less conservative in isNoWrap (PR #112553)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 17 01:12:50 PDT 2024
https://github.com/artagnon updated https://github.com/llvm/llvm-project/pull/112553
>From 6c6c13a1652102eddd6ad201a683b2933963ae60 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 16 Oct 2024 14:11:02 +0100
Subject: [PATCH] LAA: be less conservative in isNoWrap
isNoWrap has exactly one caller which handles Assume = true separately,
but too conservatively. Instead, pass Assume to isNoWrap, so it is
threaded into getPtrStride, which has the correct handling for the
Assume flag. Also note that the Stride == 1 check in isNoWrap is
incorrect: getPtrStride returns Strides == 1 or -1, except when
isNoWrapAddRec or Assume are true; we can include the case of -1 Stride,
and when isNoWrapAddRec is true. With this change, passing Assume = true
to getPtrStride could return a non-unit stride, and we correctly handle
that case as well.
---
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 15 ++----
.../offset-range-known-via-assume.ll | 1 -
.../LoopVectorize/RISCV/induction-costs.ll | 48 ++-----------------
...wrapping-pointer-non-integral-addrspace.ll | 27 ++---------
4 files changed, 13 insertions(+), 78 deletions(-)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index d35bf6818d4379..0d2ee6fb674be6 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -825,14 +825,13 @@ static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr,
/// Check whether a pointer address cannot wrap.
static bool isNoWrap(PredicatedScalarEvolution &PSE,
- const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
- Loop *L) {
+ const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr,
+ Type *AccessTy, Loop *L, bool Assume) {
const SCEV *PtrScev = PSE.getSCEV(Ptr);
if (PSE.getSE()->isLoopInvariant(PtrScev, L))
return true;
- int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
- return Stride == 1 ||
+ return getPtrStride(PSE, AccessTy, Ptr, L, Strides, Assume).has_value() ||
PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
}
@@ -1079,12 +1078,8 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
if (TranslatedPtrs.size() > 1)
return false;
- if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
- const SCEV *Expr = PSE.getSCEV(Ptr);
- if (!Assume || !isa<SCEVAddRecExpr>(Expr))
- return false;
- PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
- }
+ if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop, Assume))
+ return false;
}
// If there's only one option for Ptr, look it up after bounds and wrap
// checking, because assumptions might have been added to PSE.
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/offset-range-known-via-assume.ll b/llvm/test/Analysis/LoopAccessAnalysis/offset-range-known-via-assume.ll
index c358b00dad22a6..202f975190209a 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/offset-range-known-via-assume.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/offset-range-known-via-assume.ll
@@ -69,7 +69,6 @@ define void @offset_i32_known_positive_via_assume_forward_dep_1(ptr %A, i64 %off
; CHECK-EMPTY:
; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
; CHECK-NEXT: SCEV assumptions:
-; CHECK-NEXT: {((4 * %offset)<nsw> + %A),+,4}<nw><%loop> Added Flags: <nusw>
; CHECK-EMPTY:
; CHECK-NEXT: Expressions re-written:
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
index 09933ad4255ed6..49e9abcd9f9192 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll
@@ -22,47 +22,9 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 1
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8
-; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 288, i64 [[TMP7]])
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.umax.i64(i64 128, i64 [[TMP7]])
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP5]], [[TMP8]]
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
-; CHECK: [[VECTOR_SCEVCHECK]]:
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[X_I64]], i64 99)
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[SMAX]], [[X_I64]]
-; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP9]], i64 1)
-; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[SMAX]], [[UMIN]]
-; CHECK-NEXT: [[TMP11:%.*]] = sub i64 [[TMP10]], [[X_I64]]
-; CHECK-NEXT: [[TMP12:%.*]] = udiv i64 [[TMP11]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[UMIN]], [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = shl nsw i64 [[X_I64]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP14]]
-; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 6, i64 [[TMP13]])
-; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
-; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT]]
-; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[SCEVGEP]], i64 [[MUL_RESULT]]
-; CHECK-NEXT: [[TMP17:%.*]] = icmp ult ptr [[TMP16]], [[SCEVGEP]]
-; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW]]
-; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i64 [[X_I64]], 3
-; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP19]]
-; CHECK-NEXT: [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]])
-; CHECK-NEXT: [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
-; CHECK-NEXT: [[TMP20:%.*]] = sub i64 0, [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[SCEVGEP1]], i64 [[MUL_RESULT3]]
-; CHECK-NEXT: [[TMP22:%.*]] = icmp ult ptr [[TMP21]], [[SCEVGEP1]]
-; CHECK-NEXT: [[TMP23:%.*]] = or i1 [[TMP22]], [[MUL_OVERFLOW4]]
-; CHECK-NEXT: [[TMP24:%.*]] = add nsw i64 [[TMP19]], -8
-; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP24]]
-; CHECK-NEXT: [[MUL6:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 24, i64 [[TMP13]])
-; CHECK-NEXT: [[MUL_RESULT7:%.*]] = extractvalue { i64, i1 } [[MUL6]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW8:%.*]] = extractvalue { i64, i1 } [[MUL6]], 1
-; CHECK-NEXT: [[TMP25:%.*]] = sub i64 0, [[MUL_RESULT7]]
-; CHECK-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[SCEVGEP5]], i64 [[MUL_RESULT7]]
-; CHECK-NEXT: [[TMP27:%.*]] = icmp ult ptr [[TMP26]], [[SCEVGEP5]]
-; CHECK-NEXT: [[TMP28:%.*]] = or i1 [[TMP27]], [[MUL_OVERFLOW8]]
-; CHECK-NEXT: [[TMP29:%.*]] = or i1 [[TMP18]], [[TMP23]]
-; CHECK-NEXT: [[TMP30:%.*]] = or i1 [[TMP29]], [[TMP28]]
-; CHECK-NEXT: br i1 [[TMP30]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
; CHECK-NEXT: [[TMP31:%.*]] = shl nsw i64 [[X_I64]], 1
; CHECK-NEXT: [[SCEVGEP9:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP31]]
@@ -130,12 +92,12 @@ define void @skip_free_iv_truncate(i16 %x, ptr %A) #0 {
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL23:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_SCEVCHECK]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X_I64]], %[[ENTRY]] ], [ [[X_I64]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL12:%.*]] = phi i32 [ [[IND_END22]], %[[MIDDLE_BLOCK]] ], [ [[X_I32]], %[[ENTRY]] ], [ [[X_I32]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL23]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_CONV:%.*]] = phi i32 [ [[BC_RESUME_VAL12]], %[[SCALAR_PH]] ], [ [[TMP64:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_I64:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP61:%.*]] = load i64, ptr [[GEP_I64]], align 8
; CHECK-NEXT: [[TMP62:%.*]] = sext i32 [[IV_CONV]] to i64
diff --git a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-non-integral-addrspace.ll b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-non-integral-addrspace.ll
index 5abdde9e0564e5..e3cb3831d7938e 100644
--- a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-non-integral-addrspace.ll
+++ b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-non-integral-addrspace.ll
@@ -37,28 +37,7 @@ define void @wrapping_ptr_nonint_addrspace(ptr %arg) {
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr addrspace(13) [[SCEVGEP]], [[LOAD3]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr addrspace(13) [[SCEVGEP2]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; CHECK-NEXT: [[TMP5:%.*]] = sub i64 0, [[CALL]]
-; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[SEXT]], 2
-; CHECK-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP6]], -4
-; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr addrspace(13) [[LOAD3]], i64 [[TMP7]]
-; CHECK-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP5]])
-; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
-; CHECK-NEXT: [[TMP8:%.*]] = sub i64 0, [[MUL_RESULT]]
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr addrspace(13) [[SCEVGEP3]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt ptr addrspace(13) [[TMP9]], [[SCEVGEP3]]
-; CHECK-NEXT: [[TMP11:%.*]] = or i1 [[TMP10]], [[MUL_OVERFLOW]]
-; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr addrspace(13) [[LOAD3]], i64 -4
-; CHECK-NEXT: [[MUL5:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP5]])
-; CHECK-NEXT: [[MUL_RESULT6:%.*]] = extractvalue { i64, i1 } [[MUL5]], 0
-; CHECK-NEXT: [[MUL_OVERFLOW7:%.*]] = extractvalue { i64, i1 } [[MUL5]], 1
-; CHECK-NEXT: [[TMP12:%.*]] = sub i64 0, [[MUL_RESULT6]]
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr addrspace(13) [[SCEVGEP4]], i64 [[TMP12]]
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ugt ptr addrspace(13) [[TMP13]], [[SCEVGEP4]]
-; CHECK-NEXT: [[TMP15:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW7]]
-; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP11]], [[TMP15]]
-; CHECK-NEXT: [[LVER_SAFE:%.*]] = or i1 [[FOUND_CONFLICT]], [[TMP16]]
-; CHECK-NEXT: br i1 [[LVER_SAFE]], label %[[LOOP_PH_LVER_ORIG:.*]], label %[[LOOP_PH:.*]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[LOOP_PH_LVER_ORIG:.*]], label %[[LOOP_PH:.*]]
; CHECK: [[LOOP_PH_LVER_ORIG]]:
; CHECK-NEXT: br label %[[LOOP_LVER_ORIG:.*]]
; CHECK: [[LOOP_LVER_ORIG]]:
@@ -82,10 +61,10 @@ define void @wrapping_ptr_nonint_addrspace(ptr %arg) {
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr addrspace(13) [[LOAD3]], i64 [[ADD1]]
; CHECK-NEXT: store i32 [[LOAD4]], ptr addrspace(13) [[GEP1]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[VALUE_PHI3]], [[CALL]]
-; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT_LOOPEXIT8:.*]], label %[[LOOP]]
+; CHECK-NEXT: br i1 [[CMP]], label %[[EXIT_LOOPEXIT3:.*]], label %[[LOOP]]
; CHECK: [[EXIT_LOOPEXIT]]:
; CHECK-NEXT: br label %[[EXIT:.*]]
-; CHECK: [[EXIT_LOOPEXIT8]]:
+; CHECK: [[EXIT_LOOPEXIT3]]:
; CHECK-NEXT: br label %[[EXIT]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
More information about the llvm-commits
mailing list