[llvm] dae5cd7 - Recommit "[LSR] Consider post-inc form when creating extends/truncates."

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 19 09:57:48 PDT 2023


Author: Florian Hahn
Date: 2023-06-19T17:57:06+01:00
New Revision: dae5cd73cb38b7176f10ffa527ae4d083fdc0efe

URL: https://github.com/llvm/llvm-project/commit/dae5cd73cb38b7176f10ffa527ae4d083fdc0efe
DIFF: https://github.com/llvm/llvm-project/commit/dae5cd73cb38b7176f10ffa527ae4d083fdc0efe.diff

LOG: Recommit "[LSR] Consider post-inc form when creating extends/truncates."

This reverts the revert commit 1797ab36efc9c90c921cd725831f8c3f6a7125a2.

The recommitted version now checks the PostIncLoopSets for all fixups
and returns nullptr if the result doesn't match for all fixups.

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
    llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index f82f4a4ec55f0..a863eab7c604c 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -4138,6 +4138,29 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
   }
 }
 
+/// Extend/Truncate \p Expr to \p ToTy considering post-inc uses in \p Loops.
+/// For all PostIncLoopSets in \p Loops, first de-normalize \p Expr, then
+/// perform the extension/truncate and normalize again, as the normalized form
+/// can result in folds that are not valid in the post-inc use contexts. The
+/// expressions for all PostIncLoopSets must match, otherwise return nullptr.
+static const SCEV *
+getAnyExtendConsideringPostIncUses(ArrayRef<PostIncLoopSet> Loops,
+                                   const SCEV *Expr, Type *ToTy,
+                                   ScalarEvolution &SE) {
+  const SCEV *Result = nullptr;
+  for (auto &L : Loops) {
+    auto *DenormExpr = denormalizeForPostIncUse(Expr, L, SE);
+    const SCEV *NewDenormExpr = SE.getAnyExtendExpr(DenormExpr, ToTy);
+    const SCEV *New = normalizeForPostIncUse(NewDenormExpr, L, SE);
+    if (Result && New != Result)
+      return nullptr;
+    Result = New;
+  }
+
+  assert(Result && "failed to create expression");
+  return Result;
+}
+
 /// Generate reuse formulae from 
diff erent IV types.
 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
   // Don't bother truncating symbolic values.
@@ -4157,6 +4180,10 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
              [](const SCEV *S) { return S->getType()->isPointerTy(); }))
     return;
 
+  SmallVector<PostIncLoopSet> Loops;
+  for (auto &LF : LU.Fixups)
+    Loops.push_back(LF.PostIncLoops);
+
   for (Type *SrcTy : Types) {
     if (SrcTy != DstTy && TTI.isTruncateFree(SrcTy, DstTy)) {
       Formula F = Base;
@@ -4166,15 +4193,17 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
       // initial node (maybe due to depth limitations), but it can do them while
       // taking ext.
       if (F.ScaledReg) {
-        const SCEV *NewScaledReg = SE.getAnyExtendExpr(F.ScaledReg, SrcTy);
-        if (NewScaledReg->isZero())
-         continue;
+        const SCEV *NewScaledReg =
+            getAnyExtendConsideringPostIncUses(Loops, F.ScaledReg, SrcTy, SE);
+        if (!NewScaledReg || NewScaledReg->isZero())
+          continue;
         F.ScaledReg = NewScaledReg;
       }
       bool HasZeroBaseReg = false;
       for (const SCEV *&BaseReg : F.BaseRegs) {
-        const SCEV *NewBaseReg = SE.getAnyExtendExpr(BaseReg, SrcTy);
-        if (NewBaseReg->isZero()) {
+        const SCEV *NewBaseReg =
+            getAnyExtendConsideringPostIncUses(Loops, BaseReg, SrcTy, SE);
+        if (!NewBaseReg || NewBaseReg->isZero()) {
           HasZeroBaseReg = true;
           break;
         }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
index 466147cac3439..0ec4f64b38a1b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
@@ -31,29 +31,31 @@ define amdgpu_cs void @memmove_p1i8(ptr addrspace(1) %dst, ptr addrspace(1) %src
 ; LOOP-NEXT:    s_waitcnt vmcnt(0)
 ; LOOP-NEXT:    buffer_store_byte v8, v[6:7], s[0:3], 0 addr64
 ; LOOP-NEXT:    s_cbranch_vccnz .LBB0_2
-; LOOP-NEXT:  .LBB0_3: ; %Flow14
+; LOOP-NEXT:  .LBB0_3: ; %Flow17
 ; LOOP-NEXT:    s_andn2_saveexec_b64 s[0:1], s[4:5]
 ; LOOP-NEXT:    s_cbranch_execz .LBB0_6
 ; LOOP-NEXT:  ; %bb.4: ; %copy_backwards
-; LOOP-NEXT:    s_mov_b64 s[4:5], 3
-; LOOP-NEXT:    s_mov_b32 s2, 0
-; LOOP-NEXT:    s_mov_b32 s3, 0xf000
-; LOOP-NEXT:    s_mov_b64 s[0:1], 0
-; LOOP-NEXT:    v_mov_b32_e32 v4, s4
-; LOOP-NEXT:    v_mov_b32_e32 v5, s5
+; LOOP-NEXT:    s_mov_b32 s0, -4
+; LOOP-NEXT:    v_add_i32_e32 v0, vcc, 3, v0
+; LOOP-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; LOOP-NEXT:    v_add_i32_e32 v2, vcc, 3, v2
+; LOOP-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; LOOP-NEXT:    s_mov_b32 s6, 0
+; LOOP-NEXT:    s_mov_b32 s7, 0xf000
+; LOOP-NEXT:    s_mov_b64 s[4:5], 0
+; LOOP-NEXT:    v_mov_b32_e32 v4, s0
 ; LOOP-NEXT:  .LBB0_5: ; %copy_backwards_loop
 ; LOOP-NEXT:    ; =>This Inner Loop Header: Depth=1
-; LOOP-NEXT:    v_add_i32_e32 v6, vcc, v2, v4
-; LOOP-NEXT:    v_addc_u32_e32 v7, vcc, v3, v5, vcc
 ; LOOP-NEXT:    s_waitcnt expcnt(0)
-; LOOP-NEXT:    buffer_load_ubyte v8, v[6:7], s[0:3], 0 addr64
-; LOOP-NEXT:    v_add_i32_e32 v6, vcc, v0, v4
-; LOOP-NEXT:    v_addc_u32_e32 v7, vcc, v1, v5, vcc
-; LOOP-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
-; LOOP-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
-; LOOP-NEXT:    v_cmp_eq_u32_e32 vcc, -1, v4
+; LOOP-NEXT:    buffer_load_ubyte v5, v[2:3], s[4:7], 0 addr64
+; LOOP-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
+; LOOP-NEXT:    s_and_b64 vcc, vcc, exec
 ; LOOP-NEXT:    s_waitcnt vmcnt(0)
-; LOOP-NEXT:    buffer_store_byte v8, v[6:7], s[0:3], 0 addr64
+; LOOP-NEXT:    buffer_store_byte v5, v[0:1], s[4:7], 0 addr64
+; LOOP-NEXT:    v_add_i32_e64 v0, s[0:1], -1, v0
+; LOOP-NEXT:    v_addc_u32_e64 v1, s[0:1], -1, v1, s[0:1]
+; LOOP-NEXT:    v_add_i32_e64 v2, s[0:1], -1, v2
+; LOOP-NEXT:    v_addc_u32_e64 v3, s[0:1], -1, v3, s[0:1]
 ; LOOP-NEXT:    s_cbranch_vccz .LBB0_5
 ; LOOP-NEXT:  .LBB0_6: ; %memmove_done
 ; LOOP-NEXT:    s_endpgm

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
index 8735bd3036c9d..373dd4bacd77e 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/postinc-iv-used-by-urem-and-udiv.ll
@@ -12,18 +12,19 @@ define i32 @test_pr38847() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[LOOP]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i32 [ [[LSR_IV_NEXT2:%.*]], [[LOOP]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[LOOP]] ], [ 1, [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV_NEXT2]] = add nsw i32 [[LSR_IV1]], -1
+; CHECK-NEXT:    [[LSR:%.*]] = trunc i32 [[LSR_IV_NEXT2]] to i8
 ; CHECK-NEXT:    call void @use(i64 [[LSR_IV]])
 ; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], -1
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[LSR_IV_NEXT]] to i8
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sgt i8 [[LSR]], -1
 ; CHECK-NEXT:    br i1 [[CMP2]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
-; CHECK-NEXT:    [[TMP0:%.*]] = udiv i64 [[LSR_IV_NEXT]], 9
-; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 4294967287
-; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], [[LSR_IV_NEXT]]
-; CHECK-NEXT:    [[TMP:%.*]] = trunc i64 [[TMP2]] to i32
-; CHECK-NEXT:    ret i32 [[TMP]]
+; CHECK-NEXT:    [[TMP0:%.*]] = udiv i32 [[LSR_IV_NEXT2]], 9
+; CHECK-NEXT:    [[TMP1:%.*]] = mul nuw i32 [[TMP0]], 9
+; CHECK-NEXT:    [[TMP2:%.*]] = sub i32 [[LSR_IV_NEXT2]], [[TMP1]]
+; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
 entry:
   br label %loop
@@ -47,12 +48,12 @@ define i64 @test_pr58039() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[LOOP]] ], [ 83, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[LOOP]] ], [ -4294967213, [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[IV]] to i32
 ; CHECK-NEXT:    call void @use.i32(i32 [[TMP2]])
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nuw nsw i64 [[LSR_IV]], 4294967295
+; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], 4294967295
 ; CHECK-NEXT:    br i1 false, label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[TMP0:%.*]] = udiv i64 [[LSR_IV_NEXT]], 12
@@ -93,25 +94,23 @@ define i32 @test_pr62852() {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[LSR_IV4:%.*]] = phi i64 [ [[LSR_IV_NEXT5:%.*]], [[LOOP]] ], [ -1, [[ENTRY:%.*]] ]
-; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i64 [ [[LSR_IV_NEXT2:%.*]], [[LOOP]] ], [ 1, [[ENTRY]] ]
+; CHECK-NEXT:    [[LSR_IV1:%.*]] = phi i64 [ [[LSR_IV_NEXT2:%.*]], [[LOOP]] ], [ -1, [[ENTRY:%.*]] ]
 ; CHECK-NEXT:    [[LSR_IV:%.*]] = phi i64 [ [[LSR_IV_NEXT:%.*]], [[LOOP]] ], [ 2, [[ENTRY]] ]
-; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[LSR_IV4]], 1
+; CHECK-NEXT:    [[IV_1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[DEC_1:%.*]], [[LOOP]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[LSR_IV1]], 1
+; CHECK-NEXT:    [[DEC_1]] = add nsw i32 [[IV_1]], -1
 ; CHECK-NEXT:    call void @use(i64 [[TMP0]])
 ; CHECK-NEXT:    [[LSR_IV_NEXT]] = add nsw i64 [[LSR_IV]], -1
 ; CHECK-NEXT:    [[TMP:%.*]] = trunc i64 [[LSR_IV_NEXT]] to i32
-; CHECK-NEXT:    [[LSR_IV_NEXT2]] = add nsw i64 [[LSR_IV1]], -1
-; CHECK-NEXT:    [[LSR_IV_NEXT5]] = add nsw i64 [[LSR_IV4]], 1
+; CHECK-NEXT:    [[LSR_IV_NEXT2]] = add nsw i64 [[LSR_IV1]], 1
 ; CHECK-NEXT:    [[CMP6_1:%.*]] = icmp sgt i32 [[TMP]], 0
 ; CHECK-NEXT:    br i1 [[CMP6_1]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    call void @use(i64 [[LSR_IV_NEXT]])
-; CHECK-NEXT:    call void @use(i64 [[LSR_IV_NEXT5]])
-; CHECK-NEXT:    [[TMP1:%.*]] = udiv i64 [[LSR_IV_NEXT2]], 53
-; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 4294967243
-; CHECK-NEXT:    [[TMP3:%.*]] = add i64 [[TMP2]], [[LSR_IV_NEXT]]
-; CHECK-NEXT:    [[TMP4:%.*]] = add i64 [[TMP3]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP4]] to i32
+; CHECK-NEXT:    call void @use(i64 [[LSR_IV_NEXT2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = udiv i32 [[DEC_1]], 53
+; CHECK-NEXT:    [[TMP2:%.*]] = mul nuw i32 [[TMP1]], 53
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i32 [[DEC_1]], [[TMP2]]
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
 entry:


        


More information about the llvm-commits mailing list