[llvm-branch-commits] [llvm-branch] r109455 - in /llvm/branches/Apple/Morbo: lib/Analysis/ScalarEvolutionNormalization.cpp test/CodeGen/X86/lsr-nonaffine.ll test/CodeGen/X86/lsr-normalization.ll
Dan Gohman
gohman at apple.com
Mon Jul 26 15:15:01 PDT 2010
Author: djg
Date: Mon Jul 26 17:15:01 2010
New Revision: 109455
URL: http://llvm.org/viewvc/llvm-project?rev=109455&view=rev
Log:
$ svn merge -c 105480 https://djg@llvm.org/svn/llvm-project/llvm/trunk
--- Merging r105480 into '.':
A test/CodeGen/X86/lsr-nonaffine.ll
U lib/Analysis/ScalarEvolutionNormalization.cpp
$ svn merge -c 108793 https://djg@llvm.org/svn/llvm-project/llvm/trunk
--- Merging r108793 into '.':
G lib/Analysis/ScalarEvolutionNormalization.cpp
$ svn merge -c 108848 https://djg@llvm.org/svn/llvm-project/llvm/trunk
--- Merging r108848 into '.':
G lib/Analysis/ScalarEvolutionNormalization.cpp
$ svn merge -c 108850 https://djg@llvm.org/svn/llvm-project/llvm/trunk
--- Merging r108850 into '.':
G lib/Analysis/ScalarEvolutionNormalization.cpp
$ svn merge -c 108863 https://djg@llvm.org/svn/llvm-project/llvm/trunk
--- Merging r108863 into '.':
A test/CodeGen/X86/lsr-normalization.ll
G lib/Analysis/ScalarEvolutionNormalization.cpp
Added:
llvm/branches/Apple/Morbo/test/CodeGen/X86/lsr-nonaffine.ll
- copied unchanged from r105480, llvm/trunk/test/CodeGen/X86/lsr-nonaffine.ll
llvm/branches/Apple/Morbo/test/CodeGen/X86/lsr-normalization.ll
- copied unchanged from r108863, llvm/trunk/test/CodeGen/X86/lsr-normalization.ll
Modified:
llvm/branches/Apple/Morbo/lib/Analysis/ScalarEvolutionNormalization.cpp
Modified: llvm/branches/Apple/Morbo/lib/Analysis/ScalarEvolutionNormalization.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Morbo/lib/Analysis/ScalarEvolutionNormalization.cpp?rev=109455&r1=109454&r2=109455&view=diff
==============================================================================
--- llvm/branches/Apple/Morbo/lib/Analysis/ScalarEvolutionNormalization.cpp (original)
+++ llvm/branches/Apple/Morbo/lib/Analysis/ScalarEvolutionNormalization.cpp Mon Jul 26 17:15:01 2010
@@ -26,7 +26,7 @@
/// post-inc value when we cannot) or it can end up adding extra live-ranges to
/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
/// should use the post-inc value).
-static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
+static bool IVUseShouldUsePostIncValue(Instruction *User, Value *Operand,
const Loop *L, DominatorTree *DT) {
// If the user is in the loop, use the preinc value.
if (L->contains(User)) return false;
@@ -45,20 +45,17 @@
// their uses occur in the predecessor block, not the block the PHI lives in)
// should still use the post-inc value. Check for this case now.
PHINode *PN = dyn_cast<PHINode>(User);
- if (!PN) return false; // not a phi, not dominated by latch block.
+ if (!PN || !Operand) return false; // not a phi, not dominated by latch block.
- // Look at all of the uses of IV by the PHI node. If any use corresponds to
- // a block that is not dominated by the latch block, give up and use the
+ // Look at all of the uses of Operand by the PHI node. If any use corresponds
+ // to a block that is not dominated by the latch block, give up and use the
// preincremented value.
- unsigned NumUses = 0;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == IV) {
- ++NumUses;
- if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
- return false;
- }
+ if (PN->getIncomingValue(i) == Operand &&
+ !DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
+ return false;
- // Okay, all uses of IV by PN are in predecessor blocks that really are
+ // Okay, all uses of Operand by PN are in predecessor blocks that really are
// dominated by the latch block. Use the post-incremented value.
return true;
}
@@ -72,6 +69,7 @@
DominatorTree &DT) {
if (isa<SCEVConstant>(S) || isa<SCEVUnknown>(S))
return S;
+
if (const SCEVCastExpr *X = dyn_cast<SCEVCastExpr>(S)) {
const SCEV *O = X->getOperand();
const SCEV *N = TransformForPostIncUse(Kind, O, User, OperandValToReplace,
@@ -85,9 +83,64 @@
}
return S;
}
+
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // An addrec. This is the interesting part.
+ SmallVector<const SCEV *, 8> Operands;
+ const Loop *L = AR->getLoop();
+ // The addrec conceptually uses its operands at loop entry.
+ Instruction *LUser = L->getHeader()->begin();
+ // Transform each operand.
+ for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I) {
+ const SCEV *O = *I;
+ const SCEV *N = TransformForPostIncUse(Kind, O, LUser, 0, Loops, SE, DT);
+ Operands.push_back(N);
+ }
+ const SCEV *Result = SE.getAddRecExpr(Operands, L);
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected transform name!");
+ case NormalizeAutodetect:
+ if (IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ Loops.insert(L);
+ }
+#ifdef XDEBUG
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Normalize:
+ if (Loops.count(L)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ }
+#ifdef XDEBUG
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Denormalize:
+ if (Loops.count(L))
+ Result = cast<SCEVAddRecExpr>(Result)->getPostIncExpr(SE);
+ break;
+ }
+ return Result;
+ }
+
if (const SCEVNAryExpr *X = dyn_cast<SCEVNAryExpr>(S)) {
SmallVector<const SCEV *, 8> Operands;
bool Changed = false;
+ // Transform each operand.
for (SCEVNAryExpr::op_iterator I = X->op_begin(), E = X->op_end();
I != E; ++I) {
const SCEV *O = *I;
@@ -96,34 +149,7 @@
Changed |= N != O;
Operands.push_back(N);
}
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- // An addrec. This is the interesting part.
- const Loop *L = AR->getLoop();
- const SCEV *Result = SE.getAddRecExpr(Operands, L);
- switch (Kind) {
- default: llvm_unreachable("Unexpected transform name!");
- case NormalizeAutodetect:
- if (Instruction *OI = dyn_cast<Instruction>(OperandValToReplace))
- if (IVUseShouldUsePostIncValue(User, OI, L, &DT)) {
- Result = SE.getMinusSCEV(Result, AR->getStepRecurrence(SE));
- Loops.insert(L);
- }
- break;
- case Normalize:
- if (Loops.count(L))
- Result = SE.getMinusSCEV(Result, AR->getStepRecurrence(SE));
- break;
- case Denormalize:
- if (Loops.count(L)) {
- const SCEV *TransformedStep =
- TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
- User, OperandValToReplace, Loops, SE, DT);
- Result = SE.getAddExpr(Result, TransformedStep);
- }
- break;
- }
- return Result;
- }
+ // If any operand actually changed, return a transformed result.
if (Changed)
switch (S->getSCEVType()) {
case scAddExpr: return SE.getAddExpr(Operands);
@@ -134,6 +160,7 @@
}
return S;
}
+
if (const SCEVUDivExpr *X = dyn_cast<SCEVUDivExpr>(S)) {
const SCEV *LO = X->getLHS();
const SCEV *RO = X->getRHS();
@@ -145,6 +172,7 @@
return SE.getUDivExpr(LN, RN);
return S;
}
+
llvm_unreachable("Unexpected SCEV kind!");
return 0;
}
More information about the llvm-branch-commits
mailing list