[llvm-commits] [llvm] r101801 - in /llvm/trunk/lib/CodeGen/SelectionDAG: FunctionLoweringInfo.cpp FunctionLoweringInfo.h SelectionDAGBuilder.cpp
Dan Gohman
gohman at apple.com
Mon Apr 19 11:41:46 PDT 2010
Author: djg
Date: Mon Apr 19 13:41:46 2010
New Revision: 101801
URL: http://llvm.org/viewvc/llvm-project?rev=101801&view=rev
Log:
Move isInTailCallPosition out of SelectionDAGBuilder, as it isn't
SelectionDAG-specific.
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp?rev=101801&r1=101800&r2=101801&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Mon Apr 19 13:41:46 2010
@@ -448,3 +448,88 @@
return ISD::SETNE;
}
}
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
+ const TargetLowering &TLI) {
+ const Instruction *I = CS.getInstruction();
+ const BasicBlock *ExitBB = I->getParent();
+ const TerminatorInst *Term = ExitBB->getTerminator();
+ const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
+ const Function *F = ExitBB->getParent();
+
+ // The block must end in a return statement or unreachable.
+ //
+ // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
+ // an unreachable, for now. The way tailcall optimization is currently
+ // implemented means it will add an epilogue followed by a jump. That is
+ // not profitable. Also, if the callee is a special function (e.g.
+ // longjmp on x86), it can end up causing miscompilation that has not
+ // been fully understood.
+ if (!Ret &&
+ (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
+
+ // If I will have a chain, make sure no other instruction that will have a
+ // chain interposes between I and the return.
+ if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
+ !I->isSafeToSpeculativelyExecute())
+ for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
+ --BBI) {
+ if (&*BBI == I)
+ break;
+ // Debug info intrinsics do not get in the way of tail call optimization.
+ if (isa<DbgInfoIntrinsic>(BBI))
+ continue;
+ if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
+ !BBI->isSafeToSpeculativelyExecute())
+ return false;
+ }
+
+ // If the block ends with a void return or unreachable, it doesn't matter
+ // what the call's return type is.
+ if (!Ret || Ret->getNumOperands() == 0) return true;
+
+ // If the return value is undef, it doesn't matter what the call's
+ // return type is.
+ if (isa<UndefValue>(Ret->getOperand(0))) return true;
+
+ // Conservatively require the attributes of the call to match those of
+ // the return. Ignore noalias because it doesn't affect the call sequence.
+ unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ return false;
+
+ // It's not safe to eliminate the sign / zero extension of the return value.
+ if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ return false;
+
+ // Otherwise, make sure the unmodified return value of I is the return value.
+ for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
+ U = dyn_cast<Instruction>(U->getOperand(0))) {
+ if (!U)
+ return false;
+ if (!U->hasOneUse())
+ return false;
+ if (U == I)
+ break;
+ // Check for a truly no-op truncate.
+ if (isa<TruncInst>(U) &&
+ TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
+ continue;
+ // Check for a truly no-op bitcast.
+ if (isa<BitCastInst>(U) &&
+ (U->getOperand(0)->getType() == U->getType() ||
+ (U->getOperand(0)->getType()->isPointerTy() &&
+ U->getType()->isPointerTy())))
+ continue;
+ // Otherwise it's not a true no-op.
+ return false;
+ }
+
+ return true;
+}
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h?rev=101801&r1=101800&r2=101801&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h Mon Apr 19 13:41:46 2010
@@ -24,6 +24,7 @@
#endif
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/Support/CallSite.h"
#include <vector>
namespace llvm {
@@ -166,6 +167,15 @@
///
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
+ const TargetLowering &TLI);
+
} // end namespace llvm
#endif
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=101801&r1=101800&r2=101801&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Mon Apr 19 13:41:46 2010
@@ -4190,92 +4190,6 @@
}
}
-/// Test if the given instruction is in a position to be optimized
-/// with a tail-call. This roughly means that it's in a block with
-/// a return and there's nothing that needs to be scheduled
-/// between it and the return.
-///
-/// This function only tests target-independent requirements.
-static bool
-isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
- const TargetLowering &TLI) {
- const Instruction *I = CS.getInstruction();
- const BasicBlock *ExitBB = I->getParent();
- const TerminatorInst *Term = ExitBB->getTerminator();
- const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
- const Function *F = ExitBB->getParent();
-
- // The block must end in a return statement or unreachable.
- //
- // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
- // an unreachable, for now. The way tailcall optimization is currently
- // implemented means it will add an epilogue followed by a jump. That is
- // not profitable. Also, if the callee is a special function (e.g.
- // longjmp on x86), it can end up causing miscompilation that has not
- // been fully understood.
- if (!Ret &&
- (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
-
- // If I will have a chain, make sure no other instruction that will have a
- // chain interposes between I and the return.
- if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
- !I->isSafeToSpeculativelyExecute())
- for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
- --BBI) {
- if (&*BBI == I)
- break;
- // Debug info intrinsics do not get in the way of tail call optimization.
- if (isa<DbgInfoIntrinsic>(BBI))
- continue;
- if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
- !BBI->isSafeToSpeculativelyExecute())
- return false;
- }
-
- // If the block ends with a void return or unreachable, it doesn't matter
- // what the call's return type is.
- if (!Ret || Ret->getNumOperands() == 0) return true;
-
- // If the return value is undef, it doesn't matter what the call's
- // return type is.
- if (isa<UndefValue>(Ret->getOperand(0))) return true;
-
- // Conservatively require the attributes of the call to match those of
- // the return. Ignore noalias because it doesn't affect the call sequence.
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
- return false;
-
- // It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
- return false;
-
- // Otherwise, make sure the unmodified return value of I is the return value.
- for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
- U = dyn_cast<Instruction>(U->getOperand(0))) {
- if (!U)
- return false;
- if (!U->hasOneUse())
- return false;
- if (U == I)
- break;
- // Check for a truly no-op truncate.
- if (isa<TruncInst>(U) &&
- TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
- continue;
- // Check for a truly no-op bitcast.
- if (isa<BitCastInst>(U) &&
- (U->getOperand(0)->getType() == U->getType() ||
- (U->getOperand(0)->getType()->isPointerTy() &&
- U->getType()->isPointerTy())))
- continue;
- // Otherwise it's not a true no-op.
- return false;
- }
-
- return true;
-}
-
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
More information about the llvm-commits
mailing list