[llvm-commits] [llvm] r92471 - in /llvm/trunk/lib/Transforms/InstCombine: InstCombineCasts.cpp InstructionCombining.cpp
Chris Lattner
sabre at nondot.org
Sun Jan 3 23:59:07 PST 2010
Author: lattner
Date: Mon Jan 4 01:59:07 2010
New Revision: 92471
URL: http://llvm.org/viewvc/llvm-project?rev=92471&view=rev
Log:
move some more cast-related stuff
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp
llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp?rev=92471&r1=92470&r2=92471&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCasts.cpp Mon Jan 4 01:59:07 2010
@@ -17,6 +17,131 @@
using namespace llvm;
using namespace PatternMatch;
+/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
+/// expression. If so, decompose it, returning some value X, such that Val is
+/// X*Scale+Offset.
+///
+static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
+ int &Offset) {
+ assert(Val->getType() == Type::getInt32Ty(Val->getContext()) &&
+ "Unexpected allocation size type!");
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ Offset = CI->getZExtValue();
+ Scale = 0;
+ return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
+ } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (I->getOpcode() == Instruction::Shl) {
+ // This is a value scaled by '1 << the shift amt'.
+ Scale = 1U << RHS->getZExtValue();
+ Offset = 0;
+ return I->getOperand(0);
+ } else if (I->getOpcode() == Instruction::Mul) {
+ // This value is scaled by 'RHS'.
+ Scale = RHS->getZExtValue();
+ Offset = 0;
+ return I->getOperand(0);
+ } else if (I->getOpcode() == Instruction::Add) {
+ // We have X+C. Check to see if we really have (X*C2)+C1,
+ // where C1 is divisible by C2.
+ unsigned SubScale;
+ Value *SubVal =
+ DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
+ Offset += RHS->getZExtValue();
+ Scale = SubScale;
+ return SubVal;
+ }
+ }
+ }
+
+ // Otherwise, we can't look past this.
+ Scale = 1;
+ Offset = 0;
+ return Val;
+}
+
+/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
+/// try to eliminate the cast by moving the type information into the alloc.
+Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
+ AllocaInst &AI) {
+ // This requires TargetData to get the alloca alignment and size information.
+ if (!TD) return 0;
+
+ const PointerType *PTy = cast<PointerType>(CI.getType());
+
+ BuilderTy AllocaBuilder(*Builder);
+ AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
+
+ // Get the type really allocated and the type casted to.
+ const Type *AllocElTy = AI.getAllocatedType();
+ const Type *CastElTy = PTy->getElementType();
+ if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
+
+ unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
+ unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
+ if (CastElTyAlign < AllocElTyAlign) return 0;
+
+ // If the allocation has multiple uses, only promote it if we are strictly
+ // increasing the alignment of the resultant allocation. If we keep it the
+ // same, we open the door to infinite loops of various kinds. (A reference
+ // from a dbg.declare doesn't count as a use for this purpose.)
+ if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
+ CastElTyAlign == AllocElTyAlign) return 0;
+
+ uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
+ uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
+ if (CastElTySize == 0 || AllocElTySize == 0) return 0;
+
+ // See if we can satisfy the modulus by pulling a scale out of the array
+ // size argument.
+ unsigned ArraySizeScale;
+ int ArrayOffset;
+ Value *NumElements = // See if the array size is a decomposable linear expr.
+ DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
+
+ // If we can now satisfy the modulus, by using a non-1 scale, we really can
+ // do the xform.
+ if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
+ (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
+
+ unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
+ Value *Amt = 0;
+ if (Scale == 1) {
+ Amt = NumElements;
+ } else {
+ Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
+ // Insert before the alloca, not before the cast.
+ Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
+ }
+
+ if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
+ Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
+ Offset, true);
+ Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
+ }
+
+ AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
+ New->setAlignment(AI.getAlignment());
+ New->takeName(&AI);
+
+ // If the allocation has one real use plus a dbg.declare, just remove the
+ // declare.
+ if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
+ EraseInstFromFunction(*(Instruction*)DI);
+ }
+ // If the allocation has multiple real uses, insert a cast and change all
+ // things that used it to use the new cast. This will also hack on CI, but it
+ // will die soon.
+ else if (!AI.hasOneUse()) {
+ // New is the allocation instruction, pointer typed. AI is the original
+ // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
+ Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
+ AI.replaceAllUsesWith(NewCast);
+ }
+ return ReplaceInstUsesWith(CI, New);
+}
+
+
/// CanEvaluateInDifferentType - Return true if we can take the specified value
/// and return it as type Ty without inserting any new casts and without
/// changing the computed value. This is used by code that tries to decide
Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=92471&r1=92470&r2=92471&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Mon Jan 4 01:59:07 2010
@@ -4359,146 +4359,6 @@
}
-/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
-/// expression. If so, decompose it, returning some value X, such that Val is
-/// X*Scale+Offset.
-///
-static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
- int &Offset) {
- assert(Val->getType() == Type::getInt32Ty(Val->getContext()) &&
- "Unexpected allocation size type!");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
- Offset = CI->getZExtValue();
- Scale = 0;
- return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
- } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
- if (I->getOpcode() == Instruction::Shl) {
- // This is a value scaled by '1 << the shift amt'.
- Scale = 1U << RHS->getZExtValue();
- Offset = 0;
- return I->getOperand(0);
- } else if (I->getOpcode() == Instruction::Mul) {
- // This value is scaled by 'RHS'.
- Scale = RHS->getZExtValue();
- Offset = 0;
- return I->getOperand(0);
- } else if (I->getOpcode() == Instruction::Add) {
- // We have X+C. Check to see if we really have (X*C2)+C1,
- // where C1 is divisible by C2.
- unsigned SubScale;
- Value *SubVal =
- DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
- Offset += RHS->getZExtValue();
- Scale = SubScale;
- return SubVal;
- }
- }
- }
-
- // Otherwise, we can't look past this.
- Scale = 1;
- Offset = 0;
- return Val;
-}
-
-
-/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
-/// try to eliminate the cast by moving the type information into the alloc.
-Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
- AllocaInst &AI) {
- const PointerType *PTy = cast<PointerType>(CI.getType());
-
- BuilderTy AllocaBuilder(*Builder);
- AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
-
- // Remove any uses of AI that are dead.
- assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
-
- for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
- Instruction *User = cast<Instruction>(*UI++);
- if (isInstructionTriviallyDead(User)) {
- while (UI != E && *UI == User)
- ++UI; // If this instruction uses AI more than once, don't break UI.
-
- ++NumDeadInst;
- DEBUG(errs() << "IC: DCE: " << *User << '\n');
- EraseInstFromFunction(*User);
- }
- }
-
- // This requires TargetData to get the alloca alignment and size information.
- if (!TD) return 0;
-
- // Get the type really allocated and the type casted to.
- const Type *AllocElTy = AI.getAllocatedType();
- const Type *CastElTy = PTy->getElementType();
- if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
-
- unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
- unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
- if (CastElTyAlign < AllocElTyAlign) return 0;
-
- // If the allocation has multiple uses, only promote it if we are strictly
- // increasing the alignment of the resultant allocation. If we keep it the
- // same, we open the door to infinite loops of various kinds. (A reference
- // from a dbg.declare doesn't count as a use for this purpose.)
- if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
- CastElTyAlign == AllocElTyAlign) return 0;
-
- uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
- uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
- if (CastElTySize == 0 || AllocElTySize == 0) return 0;
-
- // See if we can satisfy the modulus by pulling a scale out of the array
- // size argument.
- unsigned ArraySizeScale;
- int ArrayOffset;
- Value *NumElements = // See if the array size is a decomposable linear expr.
- DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
-
- // If we can now satisfy the modulus, by using a non-1 scale, we really can
- // do the xform.
- if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
- (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
-
- unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
- Value *Amt = 0;
- if (Scale == 1) {
- Amt = NumElements;
- } else {
- Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
- // Insert before the alloca, not before the cast.
- Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
- }
-
- if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
- Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
- Offset, true);
- Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
- }
-
- AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
- New->setAlignment(AI.getAlignment());
- New->takeName(&AI);
-
- // If the allocation has one real use plus a dbg.declare, just remove the
- // declare.
- if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
- EraseInstFromFunction(*DI);
- }
- // If the allocation has multiple real uses, insert a cast and change all
- // things that used it to use the new cast. This will also hack on CI, but it
- // will die soon.
- else if (!AI.hasOneUse()) {
- // New is the allocation instruction, pointer typed. AI is the original
- // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
- Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
- AI.replaceAllUsesWith(NewCast);
- }
- return ReplaceInstUsesWith(CI, New);
-}
-
/// FindElementAtOffset - Given a type and a constant offset, determine whether
/// or not there is a sequence of GEP indices into the type that will land us at
More information about the llvm-commits
mailing list