[llvm] 7ff5dfb - [LICM] Support integer mul/add in hoistFPAssociation. (#67736)

via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 12 14:59:52 PST 2024


Author: Craig Topper
Date: 2024-02-12T14:59:49-08:00
New Revision: 7ff5dfbaa0c971048da0f37ec6f05f5395562c21

URL: https://github.com/llvm/llvm-project/commit/7ff5dfbaa0c971048da0f37ec6f05f5395562c21
DIFF: https://github.com/llvm/llvm-project/commit/7ff5dfbaa0c971048da0f37ec6f05f5395562c21.diff

LOG: [LICM] Support integer mul/add in hoistFPAssociation. (#67736)

The reassociation this is trying to repair can happen for integer types
too.

This patch adds support for integer mul/add to hoistFPAssociation. The
function has been renamed to hoistMulAddAssociation. I've used separate
statistics and limits for integer to allow tuning flexibility.

Added: 
    llvm/test/Transforms/LICM/expr-reassociate-int.ll

Modified: 
    llvm/lib/Transforms/Scalar/LICM.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index f3e40a5cb809bc..67295f1c76147c 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -110,6 +110,9 @@ STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
                             "and hoisted out of the loop");
 STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
                                     "reassociated and hoisted out of the loop");
+STATISTIC(NumIntAssociationsHoisted,
+          "Number of invariant int expressions "
+          "reassociated and hoisted out of the loop");
 
 /// Memory promotion is enabled by default.
 static cl::opt<bool>
@@ -135,6 +138,12 @@ static cl::opt<unsigned> FPAssociationUpperLimit(
         "Set upper limit for the number of transformations performed "
         "during a single round of hoisting the reassociated expressions."));
 
+cl::opt<unsigned> IntAssociationUpperLimit(
+    "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
+    cl::desc(
+        "Set upper limit for the number of transformations performed "
+        "during a single round of hoisting the reassociated expressions."));
+
 // Experimental option to allow imprecision in LICM in pathological cases, in
 // exchange for faster compile. This is to be removed if MemorySSA starts to
 // address the same issue. LICM calls MemorySSAWalker's
@@ -2661,21 +2670,31 @@ static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
   return false;
 }
 
+static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
+                             unsigned FPOpcode) {
+  if (I->getOpcode() == IntOpcode)
+    return true;
+  if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
+      I->hasNoSignedZeros())
+    return true;
+  return false;
+}
+
 /// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
 /// A1, A2, ... and C are loop invariants into expressions like
 /// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
 /// invariant expressions. This functions returns true only if any hoisting has
 /// actually occured.
-static bool hoistFPAssociation(Instruction &I, Loop &L,
-                               ICFLoopSafetyInfo &SafetyInfo,
-                               MemorySSAUpdater &MSSAU, AssumptionCache *AC,
-                               DominatorTree *DT) {
+static bool hoistMulAddAssociation(Instruction &I, Loop &L,
+                                   ICFLoopSafetyInfo &SafetyInfo,
+                                   MemorySSAUpdater &MSSAU, AssumptionCache *AC,
+                                   DominatorTree *DT) {
   using namespace PatternMatch;
-  Value *VariantOp = nullptr, *InvariantOp = nullptr;
 
-  if (!match(&I, m_FMul(m_Value(VariantOp), m_Value(InvariantOp))) ||
-      !I.hasAllowReassoc() || !I.hasNoSignedZeros())
+  if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
     return false;
+  Value *VariantOp = I.getOperand(0);
+  Value *InvariantOp = I.getOperand(1);
   if (L.isLoopInvariant(VariantOp))
     std::swap(VariantOp, InvariantOp);
   if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
@@ -2689,15 +2708,17 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
     Worklist.push_back(VariantBinOp);
   while (!Worklist.empty()) {
     BinaryOperator *BO = Worklist.pop_back_val();
-    if (!BO->hasOneUse() || !BO->hasAllowReassoc() || !BO->hasNoSignedZeros())
+    if (!BO->hasOneUse())
       return false;
-    BinaryOperator *Op0, *Op1;
-    if (match(BO, m_FAdd(m_BinOp(Op0), m_BinOp(Op1)))) {
-      Worklist.push_back(Op0);
-      Worklist.push_back(Op1);
+    if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
+        isa<BinaryOperator>(BO->getOperand(0)) &&
+        isa<BinaryOperator>(BO->getOperand(1))) {
+      Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
+      Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
       continue;
     }
-    if (BO->getOpcode() != Instruction::FMul || L.isLoopInvariant(BO))
+    if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
+        L.isLoopInvariant(BO))
       return false;
     Use &U0 = BO->getOperandUse(0);
     Use &U1 = BO->getOperandUse(1);
@@ -2707,7 +2728,10 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
       Changes.push_back(&U1);
     else
       return false;
-    if (Changes.size() > FPAssociationUpperLimit)
+    unsigned Limit = I.getType()->isIntOrIntVectorTy()
+                         ? IntAssociationUpperLimit
+                         : FPAssociationUpperLimit;
+    if (Changes.size() > Limit)
       return false;
   }
   if (Changes.empty())
@@ -2720,7 +2744,12 @@ static bool hoistFPAssociation(Instruction &I, Loop &L,
   for (auto *U : Changes) {
     assert(L.isLoopInvariant(U->get()));
     Instruction *Ins = cast<Instruction>(U->getUser());
-    U->set(Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul"));
+    Value *Mul;
+    if (I.getType()->isIntOrIntVectorTy())
+      Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
+    else
+      Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
+    U->set(Mul);
   }
   I.replaceAllUsesWith(VariantOp);
   eraseInstruction(I, SafetyInfo, MSSAU);
@@ -2754,9 +2783,12 @@ static bool hoistArithmetics(Instruction &I, Loop &L,
     return true;
   }
 
-  if (hoistFPAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
+  if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
     ++NumHoisted;
-    ++NumFPAssociationsHoisted;
+    if (I.getType()->isIntOrIntVectorTy())
+      ++NumIntAssociationsHoisted;
+    else
+      ++NumFPAssociationsHoisted;
     return true;
   }
 

diff  --git a/llvm/test/Transforms/LICM/expr-reassociate-int.ll b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
new file mode 100644
index 00000000000000..63548974fb3185
--- /dev/null
+++ b/llvm/test/Transforms/LICM/expr-reassociate-int.ll
@@ -0,0 +1,364 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -passes='loop-mssa(licm)' -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CONSTRAINED
+; RUN: opt -passes='loop-mssa(licm)' -licm-max-num-int-reassociations=1 -S < %s | FileCheck %s --check-prefixes=CHECK,CONSTRAINED
+
+;
+; A simple loop:
+;
+;  int j;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = d1 * cells[j + 1] * delta;
+;
+; ...should be transformed by the LICM pass into this:
+;
+;  int j;
+;  const uint64_t d1d = d1 * delta;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = d1d * cells[j + 1];
+;
+
+define void @innermost_loop_1d_shouldhoist(i32 %i, i64 %d1, i64 %delta, ptr %cells) {
+; CHECK-LABEL: define void @innermost_loop_1d_shouldhoist
+; CHECK-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[MUL_1:%.*]] = mul i64 [[DELTA]], [[D1]]
+; CHECK-NEXT:    br label [[FOR_COND:%.*]]
+; CHECK:       for.cond:
+; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CHECK-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CHECK-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul i64 [[MUL_1]], [[CELL_1]]
+; CHECK-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CHECK-NEXT:    store i64 [[MUL_2]], ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT:    br label [[FOR_COND]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %for.cond
+
+for.cond:
+  %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+  %cmp.not = icmp sgt i32 %j, %i
+  br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+  %add.j.1 = add nuw nsw i32 %j, 1
+  %idxprom.j.1 = zext i32 %add.j.1 to i64
+  %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+  %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+  %mul.1 = mul i64 %delta, %d1
+  %mul.2 = mul i64 %mul.1, %cell.1
+  %idxprom.j = zext i32 %j to i64
+  %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+  store i64 %mul.2, ptr %arrayidx.j, align 8
+  br label %for.cond
+
+for.end:
+  ret void
+}
+
+;
+; The following loop will be modified by the 'Reassociate expressions' pass,
+;
+;  int j;
+;  const uint64_t d1d = d1 * delta;
+;  const uint64_t d2d = d2 * delta;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = d1d * cells[j + 1] + d2d * cells[j];
+;
+; ...into this:
+;
+;  int j;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = (d1 * cells[j + 1] + d2 * cells[j]) * delta;
+;
+; We expect the LICM pass to undo this transformation.
+;
+
+define void @innermost_loop_2d(i32 %i, i64 %d1, i64 %d2, i64 %delta, ptr %cells) {
+; NOT_CONSTRAINED-LABEL: define void @innermost_loop_2d
+; NOT_CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; NOT_CONSTRAINED-NEXT:  entry:
+; NOT_CONSTRAINED-NEXT:    [[FACTOR_OP_MUL:%.*]] = mul i64 [[D1]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT:    [[FACTOR_OP_MUL1:%.*]] = mul i64 [[D2]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT:    br label [[FOR_COND:%.*]]
+; NOT_CONSTRAINED:       for.cond:
+; NOT_CONSTRAINED-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; NOT_CONSTRAINED-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; NOT_CONSTRAINED-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NOT_CONSTRAINED:       for.body:
+; NOT_CONSTRAINED-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; NOT_CONSTRAINED-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; NOT_CONSTRAINED-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; NOT_CONSTRAINED-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; NOT_CONSTRAINED-NEXT:    [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[FACTOR_OP_MUL]]
+; NOT_CONSTRAINED-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; NOT_CONSTRAINED-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; NOT_CONSTRAINED-NEXT:    [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT:    [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[FACTOR_OP_MUL1]]
+; NOT_CONSTRAINED-NEXT:    [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; NOT_CONSTRAINED-NEXT:    store i64 [[REASS_ADD]], ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT:    br label [[FOR_COND]]
+; NOT_CONSTRAINED:       for.end:
+; NOT_CONSTRAINED-NEXT:    ret void
+;
+; CONSTRAINED-LABEL: define void @innermost_loop_2d
+; CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CONSTRAINED-NEXT:  entry:
+; CONSTRAINED-NEXT:    br label [[FOR_COND:%.*]]
+; CONSTRAINED:       for.cond:
+; CONSTRAINED-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CONSTRAINED-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CONSTRAINED-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CONSTRAINED:       for.body:
+; CONSTRAINED-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CONSTRAINED-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CONSTRAINED-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CONSTRAINED-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CONSTRAINED-NEXT:    [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CONSTRAINED-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CONSTRAINED-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CONSTRAINED-NEXT:    [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT:    [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[D2]]
+; CONSTRAINED-NEXT:    [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT:    [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD]], [[DELTA]]
+; CONSTRAINED-NEXT:    store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT:    br label [[FOR_COND]]
+; CONSTRAINED:       for.end:
+; CONSTRAINED-NEXT:    ret void
+;
+entry:
+  br label %for.cond
+
+for.cond:
+  %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+  %cmp.not = icmp sgt i32 %j, %i
+  br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+  %add.j.1 = add nuw nsw i32 %j, 1
+  %idxprom.j.1 = zext i32 %add.j.1 to i64
+  %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+  %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+  %mul.1 = mul i64 %cell.1, %d1
+  %idxprom.j = zext i32 %j to i64
+  %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+  %cell.2 = load i64, ptr %arrayidx.j, align 8
+  %mul.2 = mul i64 %cell.2, %d2
+  %reass.add = add i64 %mul.2, %mul.1
+  %reass.mul = mul i64 %reass.add, %delta
+  store i64 %reass.mul, ptr %arrayidx.j, align 8
+  br label %for.cond
+
+for.end:
+  ret void
+}
+
+;
+; The following loop will be modified by the 'Reassociate expressions' pass,
+;
+;  int j;
+;  const uint64_t d1d = d1 * delta;
+;  const uint64_t d2d = d2 * delta;
+;  const uint64_t d3d = d3 * delta;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = d1d * cells[j + 1] + d2d * cells[j] + d3d * cells[j + 2];
+;
+; ...into this:
+;
+;  int j;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = (d1 * cells[j + 1] + d2 * cells[j] + d3 * cells[j + 2]) * delta;
+;
+; We expect the LICM pass to undo this transformation.
+;
+
+
+define void @innermost_loop_3d(i32 %i, i64 %d1, i64 %d2, i64 %d3, i64 %delta, ptr %cells) {
+; NOT_CONSTRAINED-LABEL: define void @innermost_loop_3d
+; NOT_CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[D3:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; NOT_CONSTRAINED-NEXT:  entry:
+; NOT_CONSTRAINED-NEXT:    [[FACTOR_OP_MUL:%.*]] = mul i64 [[D3]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT:    [[FACTOR_OP_MUL1:%.*]] = mul i64 [[D1]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT:    [[FACTOR_OP_MUL2:%.*]] = mul i64 [[D2]], [[DELTA]]
+; NOT_CONSTRAINED-NEXT:    br label [[FOR_COND:%.*]]
+; NOT_CONSTRAINED:       for.cond:
+; NOT_CONSTRAINED-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; NOT_CONSTRAINED-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; NOT_CONSTRAINED-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NOT_CONSTRAINED:       for.body:
+; NOT_CONSTRAINED-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; NOT_CONSTRAINED-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; NOT_CONSTRAINED-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; NOT_CONSTRAINED-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; NOT_CONSTRAINED-NEXT:    [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[FACTOR_OP_MUL1]]
+; NOT_CONSTRAINED-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; NOT_CONSTRAINED-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; NOT_CONSTRAINED-NEXT:    [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; NOT_CONSTRAINED-NEXT:    [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[FACTOR_OP_MUL2]]
+; NOT_CONSTRAINED-NEXT:    [[ADD_J_2:%.*]] = add nuw nsw i32 [[J]], 2
+; NOT_CONSTRAINED-NEXT:    [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_2]] to i64
+; NOT_CONSTRAINED-NEXT:    [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; NOT_CONSTRAINED-NEXT:    [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; NOT_CONSTRAINED-NEXT:    [[MUL_3:%.*]] = mul i64 [[CELL_3]], [[FACTOR_OP_MUL]]
+; NOT_CONSTRAINED-NEXT:    [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; NOT_CONSTRAINED-NEXT:    [[REASS_ADD1:%.*]] = add i64 [[REASS_ADD]], [[MUL_3]]
+; NOT_CONSTRAINED-NEXT:    store i64 [[REASS_ADD1]], ptr [[ARRAYIDX_J_2]], align 8
+; NOT_CONSTRAINED-NEXT:    br label [[FOR_COND]]
+; NOT_CONSTRAINED:       for.end:
+; NOT_CONSTRAINED-NEXT:    ret void
+;
+; CONSTRAINED-LABEL: define void @innermost_loop_3d
+; CONSTRAINED-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[D3:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CONSTRAINED-NEXT:  entry:
+; CONSTRAINED-NEXT:    br label [[FOR_COND:%.*]]
+; CONSTRAINED:       for.cond:
+; CONSTRAINED-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CONSTRAINED-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CONSTRAINED-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CONSTRAINED:       for.body:
+; CONSTRAINED-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CONSTRAINED-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CONSTRAINED-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CONSTRAINED-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CONSTRAINED-NEXT:    [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CONSTRAINED-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CONSTRAINED-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CONSTRAINED-NEXT:    [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CONSTRAINED-NEXT:    [[MUL_2:%.*]] = mul i64 [[CELL_2]], [[D2]]
+; CONSTRAINED-NEXT:    [[ADD_J_2:%.*]] = add nuw nsw i32 [[J]], 2
+; CONSTRAINED-NEXT:    [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_2]] to i64
+; CONSTRAINED-NEXT:    [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; CONSTRAINED-NEXT:    [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CONSTRAINED-NEXT:    [[MUL_3:%.*]] = mul i64 [[CELL_3]], [[D3]]
+; CONSTRAINED-NEXT:    [[REASS_ADD:%.*]] = add i64 [[MUL_2]], [[MUL_1]]
+; CONSTRAINED-NEXT:    [[REASS_ADD1:%.*]] = add i64 [[REASS_ADD]], [[MUL_3]]
+; CONSTRAINED-NEXT:    [[REASS_MUL:%.*]] = mul i64 [[REASS_ADD1]], [[DELTA]]
+; CONSTRAINED-NEXT:    store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J_2]], align 8
+; CONSTRAINED-NEXT:    br label [[FOR_COND]]
+; CONSTRAINED:       for.end:
+; CONSTRAINED-NEXT:    ret void
+;
+entry:
+  br label %for.cond
+
+for.cond:
+  %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+  %cmp.not = icmp sgt i32 %j, %i
+  br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+  %add.j.1 = add nuw nsw i32 %j, 1
+  %idxprom.j.1 = zext i32 %add.j.1 to i64
+  %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+  %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+  %mul.1 = mul i64 %cell.1, %d1
+  %idxprom.j = zext i32 %j to i64
+  %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+  %cell.2 = load i64, ptr %arrayidx.j, align 8
+  %mul.2 = mul i64 %cell.2, %d2
+  %add.j.2 = add nuw nsw i32 %j, 2
+  %idxprom.j.2 = zext i32 %add.j.2 to i64
+  %arrayidx.j.2 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.2
+  %cell.3 = load i64, ptr %arrayidx.j.2, align 8
+  %mul.3 = mul i64 %cell.3, %d3
+  %reass.add = add i64 %mul.2, %mul.1
+  %reass.add1 = add i64 %reass.add, %mul.3
+  %reass.mul = mul i64 %reass.add1, %delta
+  store i64 %reass.mul, ptr %arrayidx.j.2, align 8
+  br label %for.cond
+
+for.end:
+  ret void
+}
+
+;
+; The following loop will not be modified by the LICM pass:
+;
+;  int j;
+;
+;  for (j = 0; j <= i; j++)
+;    cells[j] = (d1 * cells[j + 1] + d2 * cells[j] +
+;                cells[j] * cells[j + 1]) * delta;
+;
+; This case 
diff ers as one of the multiplications involves no invariants.
+;
+
+define void @innermost_loop_3d_reassociated_
diff erent(i32 %i, i64 %d1, i64 %d2, i64 %delta, ptr %cells) {
+; CHECK-LABEL: define void @innermost_loop_3d_reassociated_
diff erent
+; CHECK-SAME: (i32 [[I:%.*]], i64 [[D1:%.*]], i64 [[D2:%.*]], i64 [[DELTA:%.*]], ptr [[CELLS:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_COND:%.*]]
+; CHECK:       for.cond:
+; CHECK-NEXT:    [[J:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_J_1:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[J]], [[I]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[ADD_J_1]] = add nuw nsw i32 [[J]], 1
+; CHECK-NEXT:    [[IDXPROM_J_1:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_J_1:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_1]]
+; CHECK-NEXT:    [[CELL_1:%.*]] = load i64, ptr [[ARRAYIDX_J_1]], align 8
+; CHECK-NEXT:    [[IDXPROM_J_2:%.*]] = zext i32 [[ADD_J_1]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_J_2:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J_2]]
+; CHECK-NEXT:    [[CELL_2:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CHECK-NEXT:    [[CELL_3:%.*]] = load i64, ptr [[ARRAYIDX_J_2]], align 8
+; CHECK-NEXT:    [[IDXPROM_J:%.*]] = zext i32 [[J]] to i64
+; CHECK-NEXT:    [[ARRAYIDX_J:%.*]] = getelementptr inbounds i64, ptr [[CELLS]], i64 [[IDXPROM_J]]
+; CHECK-NEXT:    [[CELL_4:%.*]] = load i64, ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT:    [[MUL_1:%.*]] = mul i64 [[CELL_1]], [[D1]]
+; CHECK-NEXT:    [[MUL_2:%.*]] = mul i64 [[CELL_4]], [[D2]]
+; CHECK-NEXT:    [[EXTRA_MUL:%.*]] = mul i64 [[CELL_3]], [[CELL_2]]
+; CHECK-NEXT:    [[REASS_ADD:%.*]] = add i64 [[EXTRA_MUL]], [[MUL_1]]
+; CHECK-NEXT:    [[EXTRA_ADD:%.*]] = add i64 [[REASS_ADD]], [[MUL_2]]
+; CHECK-NEXT:    [[REASS_MUL:%.*]] = mul i64 [[EXTRA_ADD]], [[DELTA]]
+; CHECK-NEXT:    store i64 [[REASS_MUL]], ptr [[ARRAYIDX_J]], align 8
+; CHECK-NEXT:    br label [[FOR_COND]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %for.cond
+
+for.cond:
+  %j = phi i32 [ 0, %entry ], [ %add.j.1, %for.body ]
+  %cmp.not = icmp sgt i32 %j, %i
+  br i1 %cmp.not, label %for.end, label %for.body
+
+for.body:
+  %add.j.1 = add nuw nsw i32 %j, 1
+  %idxprom.j.1 = zext i32 %add.j.1 to i64
+  %arrayidx.j.1 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.1
+  %cell.1 = load i64, ptr %arrayidx.j.1, align 8
+  %idxprom.j.2 = zext i32 %add.j.1 to i64
+  %arrayidx.j.2 = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j.2
+  %cell.2 = load i64, ptr %arrayidx.j.2, align 8
+  %idxprom.j.3 = zext i32 %add.j.1 to i64
+  %cell.3 = load i64, ptr %arrayidx.j.2, align 8
+  %idxprom.j = zext i32 %j to i64
+  %arrayidx.j = getelementptr inbounds i64, ptr %cells, i64 %idxprom.j
+  %cell.4 = load i64, ptr %arrayidx.j, align 8
+  %mul.1 = mul i64 %cell.1, %d1
+  %mul.2 = mul i64 %cell.4, %d2
+  %extra.mul = mul i64 %cell.3, %cell.2
+  %reass.add = add i64 %extra.mul, %mul.1
+  %extra.add = add i64 %reass.add, %mul.2
+  %reass.mul = mul i64 %extra.add, %delta
+  store i64 %reass.mul, ptr %arrayidx.j, align 8
+  br label %for.cond
+
+for.end:
+  ret void
+}


        


More information about the llvm-commits mailing list