[llvm] 7f5b15a - [LSR] Move normalization check to normalizeForPostIncUse.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 4 03:57:20 PDT 2023


Author: Florian Hahn
Date: 2023-07-04T11:56:51+01:00
New Revision: 7f5b15ad150e59815fbd4adc3202c8720718896e

URL: https://github.com/llvm/llvm-project/commit/7f5b15ad150e59815fbd4adc3202c8720718896e
DIFF: https://github.com/llvm/llvm-project/commit/7f5b15ad150e59815fbd4adc3202c8720718896e.diff

LOG: [LSR] Move normalization check to normalizeForPostIncUse.

Move the logic added in 3a57152d85e1 to normalizeForPostIncUse to catch
additional un-invertable cases. This fixes another mis-compile pointed
out by @peixin in D153004.

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
    llvm/lib/Analysis/IVUsers.cpp
    llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
    llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
index da420ff1e6d2d9..c82ae4c8032373 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -50,7 +50,7 @@ typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;
 typedef function_ref<bool(const SCEVAddRecExpr *)> NormalizePredTy;
 
 /// Normalize \p S to be post-increment for all loops present in \p
-/// Loops.
+/// Loops. Returns nullptr if the result is not invertible.
 const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
                                    ScalarEvolution &SE);
 

diff  --git a/llvm/lib/Analysis/IVUsers.cpp b/llvm/lib/Analysis/IVUsers.cpp
index 665d4eacaf5ca4..af54de09b0a2a9 100644
--- a/llvm/lib/Analysis/IVUsers.cpp
+++ b/llvm/lib/Analysis/IVUsers.cpp
@@ -335,14 +335,7 @@ const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &IU) const {
 /// getExpr - Return the expression for the use.
 const SCEV *IVUsers::getExpr(const IVStrideUse &IU) const {
   const SCEV *Replacement = getReplacementExpr(IU);
-  const SCEV *Normalized =
-      normalizeForPostIncUse(Replacement, IU.getPostIncLoops(), *SE);
-  const SCEV *Denormalized =
-      denormalizeForPostIncUse(Normalized, IU.getPostIncLoops(), *SE);
-  // If the normalized expression isn't invertible.
-  if (Denormalized != Replacement)
-    return nullptr;
-  return Normalized;
+  return normalizeForPostIncUse(Replacement, IU.getPostIncLoops(), *SE);
 }
 
 static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {

diff  --git a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index bc0fbffee61b09..198c716249a182 100644
--- a/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -102,7 +102,13 @@ const SCEV *llvm::normalizeForPostIncUse(const SCEV *S,
   auto Pred = [&](const SCEVAddRecExpr *AR) {
     return Loops.count(AR->getLoop());
   };
-  return NormalizeDenormalizeRewriter(Normalize, Pred, SE).visit(S);
+  const SCEV *Normalized =
+      NormalizeDenormalizeRewriter(Normalize, Pred, SE).visit(S);
+  const SCEV *Denormalized = denormalizeForPostIncUse(Normalized, Loops, SE);
+  // If the normalized expression isn't invertible.
+  if (Denormalized != S)
+    return nullptr;
+  return Normalized;
 }
 
 const SCEV *llvm::normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,

diff  --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index b5854211e811cf..905f84efcae58e 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -3361,6 +3361,8 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
           // S is normalized, so normalize N before folding it into S
           // to keep the result normalized.
           N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
+          if (!N)
+            continue;
           Kind = LSRUse::ICmpZero;
           S = SE.getMinusSCEV(N, S);
         } else if (L->isLoopInvariant(NV) &&
@@ -3375,6 +3377,8 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
           // SCEV can't compute the 
diff erence of two unknown pointers.
           N = SE.getUnknown(NV);
           N = normalizeForPostIncUse(N, TmpPostIncLoops, SE);
+          if (!N)
+            continue;
           Kind = LSRUse::ICmpZero;
           S = SE.getMinusSCEV(N, S);
           assert(!isa<SCEVCouldNotCompute>(S));
@@ -4160,7 +4164,7 @@ getAnyExtendConsideringPostIncUses(ArrayRef<PostIncLoopSet> Loops,
     auto *DenormExpr = denormalizeForPostIncUse(Expr, L, SE);
     const SCEV *NewDenormExpr = SE.getAnyExtendExpr(DenormExpr, ToTy);
     const SCEV *New = normalizeForPostIncUse(NewDenormExpr, L, SE);
-    if (Result && New != Result)
+    if (!New || (Result && New != Result))
       return nullptr;
     Result = New;
   }

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
index 0d499efa8570d0..95b064bd044fa4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
@@ -142,6 +142,7 @@ define i64 @test_normalization_failure_in_any_extend(ptr %i, i64 %i1, i8 %i25) {
 ; CHECK:       loop.1.header:
 ; CHECK-NEXT:    [[IV_1:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_1_NEXT:%.*]], [[LOOP_1_LATCH:%.*]] ]
 ; CHECK-NEXT:    [[IV_2:%.*]] = phi i64 [ [[I1]], [[ENTRY]] ], [ [[TMP1:%.*]], [[LOOP_1_LATCH]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[IV_2]], 2
 ; CHECK-NEXT:    br label [[LOOP_2:%.*]]
 ; CHECK:       loop.2:
 ; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i32 [ [[LSR_IV_NEXT:%.*]], [[LOOP_2]] ], [ 2, [[LOOP_1_HEADER]] ]
@@ -149,33 +150,34 @@ define i64 @test_normalization_failure_in_any_extend(ptr %i, i64 %i1, i8 %i25) {
 ; CHECK-NEXT:    [[C_1:%.*]] = icmp sgt i32 [[LSR_IV_NEXT]], 0
 ; CHECK-NEXT:    br i1 [[C_1]], label [[LOOP_2]], label [[LOOP_3_PREHEADER:%.*]]
 ; CHECK:       loop.3.preheader:
-; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[IV_2]], 1
 ; CHECK-NEXT:    br label [[LOOP_3:%.*]]
 ; CHECK:       loop.3:
-; CHECK-NEXT:    [[LSR_IV5:%.*]] = phi i64 [ [[TMP0]], [[LOOP_3_PREHEADER]] ], [ [[LSR_IV_NEXT6:%.*]], [[LOOP_3]] ]
+; CHECK-NEXT:    [[LSR_IV5:%.*]] = phi i64 [ 0, [[LOOP_3_PREHEADER]] ], [ [[LSR_IV_NEXT6:%.*]], [[LOOP_3]] ]
 ; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i64 [ 2, [[LOOP_3_PREHEADER]] ], [ [[LSR_IV_NEXT2:%.*]], [[LOOP_3]] ]
 ; CHECK-NEXT:    [[IV_5:%.*]] = phi i32 [ [[IV_5_NEXT:%.*]], [[LOOP_3]] ], [ 1, [[LOOP_3_PREHEADER]] ]
 ; CHECK-NEXT:    [[IV_5_NEXT]] = add nsw i32 [[IV_5]], -1
 ; CHECK-NEXT:    [[LSR:%.*]] = trunc i32 [[IV_5_NEXT]] to i8
 ; CHECK-NEXT:    [[LSR_IV_NEXT2]] = add nsw i64 [[LSR_IV1]], -1
 ; CHECK-NEXT:    [[TMP:%.*]] = trunc i64 [[LSR_IV_NEXT2]] to i32
-; CHECK-NEXT:    [[LSR_IV_NEXT6]] = add i64 [[LSR_IV5]], 1
+; CHECK-NEXT:    [[LSR_IV_NEXT6]] = add nsw i64 [[LSR_IV5]], -1
 ; CHECK-NEXT:    [[C_2:%.*]] = icmp sgt i32 [[TMP]], 0
 ; CHECK-NEXT:    br i1 [[C_2]], label [[LOOP_3]], label [[LOOP_1_LATCH]]
 ; CHECK:       loop.1.latch:
 ; CHECK-NEXT:    [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1
-; CHECK-NEXT:    [[TMP1]] = add i64 [[LSR_IV_NEXT6]], 1
+; CHECK-NEXT:    [[TMP1]] = sub i64 [[TMP0]], [[LSR_IV_NEXT6]]
 ; CHECK-NEXT:    [[C_3:%.*]] = icmp eq i32 [[IV_1_NEXT]], 8
 ; CHECK-NEXT:    br i1 [[C_3]], label [[EXIT:%.*]], label [[LOOP_1_HEADER]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    call void @use.i32(i32 [[IV_5_NEXT]])
-; CHECK-NEXT:    call void @use(i64 [[LSR_IV_NEXT6]])
+; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[IV_2]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP2]], [[LSR_IV_NEXT6]]
+; CHECK-NEXT:    call void @use(i64 [[TMP3]])
 ; CHECK-NEXT:    call void @use(i64 [[LSR_IV_NEXT2]])
-; CHECK-NEXT:    [[TMP2:%.*]] = udiv i32 [[IV_5_NEXT]], 53
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
-; CHECK-NEXT:    [[TMP4:%.*]] = mul i8 [[TMP3]], 53
-; CHECK-NEXT:    [[TMP5:%.*]] = sub i8 [[LSR]], [[TMP4]]
-; CHECK-NEXT:    call void @use.i8(i8 [[TMP5]])
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i32 [[IV_5_NEXT]], 53
+; CHECK-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
+; CHECK-NEXT:    [[TMP6:%.*]] = mul i8 [[TMP5]], 53
+; CHECK-NEXT:    [[TMP7:%.*]] = sub i8 [[LSR]], [[TMP6]]
+; CHECK-NEXT:    call void @use.i8(i8 [[TMP7]])
 ; CHECK-NEXT:    [[I26:%.*]] = xor i8 [[I25]], 5
 ; CHECK-NEXT:    [[I27:%.*]] = zext i8 [[I26]] to i64
 ; CHECK-NEXT:    ret i64 [[I27]]


        


More information about the llvm-commits mailing list