[llvm-commits] CVS: llvm/lib/Transforms/Scalar/InstructionCombining.cpp Reassociate.cpp TailRecursionElimination.cpp
Jeff Cohen
jeffc at jolt-lang.org
Tue Jul 26 23:12:58 PDT 2005
Changes in directory llvm/lib/Transforms/Scalar:
InstructionCombining.cpp updated: 1.359 -> 1.360
Reassociate.cpp updated: 1.49 -> 1.50
TailRecursionElimination.cpp updated: 1.17 -> 1.18
---
Log message:
Eliminate all remaining tabs and trailing spaces.
---
Diffs of the changes: (+18 -18)
InstructionCombining.cpp | 14 +++++++-------
Reassociate.cpp | 20 ++++++++++----------
TailRecursionElimination.cpp | 2 +-
3 files changed, 18 insertions(+), 18 deletions(-)
Index: llvm/lib/Transforms/Scalar/InstructionCombining.cpp
diff -u llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.359 llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.360
--- llvm/lib/Transforms/Scalar/InstructionCombining.cpp:1.359 Wed Jul 20 13:49:28 2005
+++ llvm/lib/Transforms/Scalar/InstructionCombining.cpp Wed Jul 27 01:12:34 2005
@@ -1319,7 +1319,7 @@
static bool MaskedValueIsZero(Value *V, ConstantIntegral *Mask) {
// Note, we cannot consider 'undef' to be "IsZero" here. The problem is that
// we cannot optimize based on the assumption that it is zero without changing
- // to to an explicit zero. If we don't change it to zero, other code could
+ // to to an explicit zero. If we don't change it to zero, other code could
// optimized based on the contradictory assumption that it is non-zero.
// Because instcombine aggressively folds operations with undef args anyway,
// this won't lose us code quality.
@@ -2308,7 +2308,7 @@
// compare the base pointer.
if (PtrBase != GEPRHS->getOperand(0)) {
bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
- IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
+ IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
GEPRHS->getOperand(0)->getType();
if (IndicesTheSame)
for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
@@ -3103,7 +3103,7 @@
}
}
- // Finally, return the value computed.
+ // Finally, return the value computed.
if (SCI.getOpcode() == Instruction::SetLT) {
return ReplaceInstUsesWith(SCI, Result);
} else {
@@ -3167,7 +3167,7 @@
return new CastInst(V, I.getType());
}
}
-
+
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(Op1)) {
// shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr
// of a signed value.
@@ -3623,7 +3623,7 @@
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (Op1C->getRawValue() == 0) {
// If the input only has the low bit set, simplify directly.
- Constant *Not1 =
+ Constant *Not1 =
ConstantExpr::getNot(ConstantInt::get(Op0->getType(), 1));
// cast (X != 0) to int --> X if X&~1 == 0
if (MaskedValueIsZero(Op0, cast<ConstantIntegral>(Not1))) {
@@ -3666,7 +3666,7 @@
if ((Op1C->getRawValue() & Op1C->getRawValue()-1) == 0) {
// cast (X == 1) to int -> X iff X has only the low bit set.
if (Op1C->getRawValue() == 1) {
- Constant *Not1 =
+ Constant *Not1 =
ConstantExpr::getNot(ConstantInt::get(Op0->getType(), 1));
if (MaskedValueIsZero(Op0, cast<ConstantIntegral>(Not1))) {
if (CI.getType() == Op0->getType())
@@ -5247,7 +5247,7 @@
E = df_ext_end(&F.front(), Visited); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
WorkList.push_back(I);
-
+
// Do a quick scan over the function. If we find any blocks that are
// unreachable, remove any instructions inside of them. This prevents
// the instcombine code from having to deal with some bad special cases.
Index: llvm/lib/Transforms/Scalar/Reassociate.cpp
diff -u llvm/lib/Transforms/Scalar/Reassociate.cpp:1.49 llvm/lib/Transforms/Scalar/Reassociate.cpp:1.50
--- llvm/lib/Transforms/Scalar/Reassociate.cpp:1.49 Mon May 9 22:39:25 2005
+++ llvm/lib/Transforms/Scalar/Reassociate.cpp Wed Jul 27 01:12:34 2005
@@ -121,7 +121,7 @@
unsigned &CachedRank = ValueRankMap[I];
if (CachedRank) return CachedRank; // Rank already known?
-
+
// If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
// we can reassociate expressions for code motion! Since we do not recurse
// for PHI nodes, we cannot have infinite recursion here, because there
@@ -130,7 +130,7 @@
for (unsigned i = 0, e = I->getNumOperands();
i != e && Rank != MaxRank; ++i)
Rank = std::max(Rank, getRank(I->getOperand(i)));
-
+
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
if (!I->getType()->isIntegral() ||
@@ -139,7 +139,7 @@
//DEBUG(std::cerr << "Calculated Rank[" << V->getName() << "] = "
//<< Rank << "\n");
-
+
return CachedRank = Rank;
}
@@ -176,7 +176,7 @@
void Reassociate::LinearizeExpr(BinaryOperator *I) {
BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
BinaryOperator *RHS = cast<BinaryOperator>(I->getOperand(1));
- assert(isReassociableOp(LHS, I->getOpcode()) &&
+ assert(isReassociableOp(LHS, I->getOpcode()) &&
isReassociableOp(RHS, I->getOpcode()) &&
"Not an expression that needs linearization?");
@@ -190,7 +190,7 @@
I->setOperand(1, RHS->getOperand(0));
RHS->setOperand(0, LHS);
I->setOperand(0, RHS);
-
+
++NumLinear;
MadeChange = true;
DEBUG(std::cerr << "Linearized: " << *I);
@@ -363,7 +363,7 @@
// Everyone now refers to the add instruction.
Sub->replaceAllUsesWith(New);
Sub->eraseFromParent();
-
+
DEBUG(std::cerr << "Negated: " << *New);
return New;
}
@@ -536,7 +536,7 @@
//case Instruction::Mul:
}
- if (IterateOptimization)
+ if (IterateOptimization)
OptimizeExpression(Opcode, Ops);
}
@@ -590,13 +590,13 @@
// If this instruction is a commutative binary operator, process it.
if (!BI->isAssociative()) continue;
BinaryOperator *I = cast<BinaryOperator>(BI);
-
+
// If this is an interior node of a reassociable tree, ignore it until we
// get to the root of the tree, to avoid N^2 analysis.
if (I->hasOneUse() && isReassociableOp(I->use_back(), I->getOpcode()))
continue;
- // First, walk the expression tree, linearizing the tree, collecting
+ // First, walk the expression tree, linearizing the tree, collecting
std::vector<ValueEntry> Ops;
LinearizeExprTree(I, Ops);
@@ -619,7 +619,7 @@
// this is a multiply tree used only by an add, and the immediate is a -1.
// In this case we reassociate to put the negation on the outside so that we
// can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
- if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
+ if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Ops.back().Op) &&
cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
Index: llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
diff -u llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp:1.17 llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp:1.18
--- llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp:1.17 Mon May 9 18:51:13 2005
+++ llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp Wed Jul 27 01:12:34 2005
@@ -117,7 +117,7 @@
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!FunctionContainsEscapingAllocas)
FunctionContainsEscapingAllocas = CheckForEscapingAllocas(BB);
-
+
if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator()))
MadeChange |= ProcessReturningBlock(Ret, OldEntry, ArgumentPHIs);
}
More information about the llvm-commits
mailing list