[llvm] r352771 - [CallSite removal] Remove CallSite uses from InstCombine.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 31 09:23:29 PST 2019
Author: ctopper
Date: Thu Jan 31 09:23:29 2019
New Revision: 352771
URL: http://llvm.org/viewvc/llvm-project?rev=352771&view=rev
Log:
[CallSite removal] Remove CallSite uses from InstCombine.
Reviewers: chandlerc
Reviewed By: chandlerc
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D57494
Modified:
llvm/trunk/include/llvm/IR/InstrTypes.h
llvm/trunk/lib/IR/Instructions.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
Modified: llvm/trunk/include/llvm/IR/InstrTypes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/InstrTypes.h?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/InstrTypes.h (original)
+++ llvm/trunk/include/llvm/IR/InstrTypes.h Thu Jan 31 09:23:29 2019
@@ -1095,6 +1095,19 @@ public:
return isDataOperand(&UI.getUse());
}
+ /// Given a value use iterator, return the data operand corresponding to it.
+ /// Iterator must actually correspond to a data operand.
+ unsigned getDataOperandNo(Value::const_user_iterator UI) const {
+ return getDataOperandNo(&UI.getUse());
+ }
+
+ /// Given a use for a data operand, get the data operand number that
+ /// corresponds to it.
+ unsigned getDataOperandNo(const Use *U) const {
+ assert(isDataOperand(U) && "Data operand # out of range!");
+ return U - data_operands_begin();
+ }
+
/// Return the iterator pointing to the beginning of the argument list.
User::op_iterator arg_begin() { return op_begin(); }
User::const_op_iterator arg_begin() const {
@@ -1198,6 +1211,13 @@ public:
return const_cast<CallBase *>(this)->getCaller();
}
+ /// Tests if this call site must be tail call optimized. Only a CallInst can
+ /// be tail call optimized.
+ bool isMustTailCall() const;
+
+ /// Tests if this call site is marked as a tail call.
+ bool isTailCall() const;
+
/// Returns the intrinsic ID of the intrinsic called or
/// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
/// this is an indirect call.
Modified: llvm/trunk/lib/IR/Instructions.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Instructions.cpp?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Instructions.cpp (original)
+++ llvm/trunk/lib/IR/Instructions.cpp Thu Jan 31 09:23:29 2019
@@ -266,6 +266,21 @@ bool CallBase::isIndirectCall() const {
return true;
}
+/// Tests if this call site must be tail call optimized. Only a CallInst can
+/// be tail call optimized.
+bool CallBase::isMustTailCall() const {
+ if (auto *CI = dyn_cast<CallInst>(this))
+ return CI->isMustTailCall();
+ return false;
+}
+
+/// Tests if this call site is marked as a tail call.
+bool CallBase::isTailCall() const {
+ if (auto *CI = dyn_cast<CallInst>(this))
+ return CI->isTailCall();
+ return false;
+}
+
Intrinsic::ID CallBase::getIntrinsicID() const {
if (auto *F = getCalledFunction())
return F->getIntrinsicID();
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Thu Jan 31 09:23:29 2019
@@ -27,7 +27,6 @@
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -1788,7 +1787,7 @@ static Instruction *canonicalizeConstant
}
/// CallInst simplification. This mostly only handles folding of intrinsic
-/// instructions. For normal calls, it allows visitCallSite to do the heavy
+/// instructions. For normal calls, it allows visitCallBase to do the heavy
/// lifting.
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Value *V = SimplifyCall(&CI, SQ.getWithInstruction(&CI)))
@@ -1805,10 +1804,10 @@ Instruction *InstCombiner::visitCallInst
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
- if (!II) return visitCallSite(&CI);
+ if (!II) return visitCallBase(CI);
// Intrinsics cannot occur in an invoke, so handle them here instead of in
- // visitCallSite.
+ // visitCallBase.
if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
bool Changed = false;
@@ -3962,7 +3961,7 @@ Instruction *InstCombiner::visitCallInst
break;
}
}
- return visitCallSite(II);
+ return visitCallBase(*II);
}
// Fence instruction simplification
@@ -3977,12 +3976,12 @@ Instruction *InstCombiner::visitFenceIns
// InvokeInst simplification
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
- return visitCallSite(&II);
+ return visitCallBase(II);
}
/// If this cast does not affect the value passed through the varargs area, we
/// can eliminate the use of the cast.
-static bool isSafeToEliminateVarargsCast(const CallSite CS,
+static bool isSafeToEliminateVarargsCast(const CallBase &Call,
const DataLayout &DL,
const CastInst *const CI,
const int ix) {
@@ -3994,13 +3993,13 @@ static bool isSafeToEliminateVarargsCast
// TODO: This is probably something which should be expanded to all
// intrinsics since the entire point of intrinsics is that
// they are understandable by the optimizer.
- if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
+ if (isStatepoint(&Call) || isGCRelocate(&Call) || isGCResult(&Call))
return false;
// The size of ByVal or InAlloca arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.
- if (!CS.isByValOrInAllocaArgument(ix))
+ if (!Call.isByValOrInAllocaArgument(ix))
return true;
Type* SrcTy =
@@ -4109,9 +4108,9 @@ static IntrinsicInst *findInitTrampoline
}
/// Improvements for call and invoke instructions.
-Instruction *InstCombiner::visitCallSite(CallSite CS) {
- if (isAllocLikeFn(CS.getInstruction(), &TLI))
- return visitAllocSite(*CS.getInstruction());
+Instruction *InstCombiner::visitCallBase(CallBase &Call) {
+ if (isAllocLikeFn(&Call, &TLI))
+ return visitAllocSite(Call);
bool Changed = false;
@@ -4121,49 +4120,49 @@ Instruction *InstCombiner::visitCallSite
SmallVector<unsigned, 4> ArgNos;
unsigned ArgNo = 0;
- for (Value *V : CS.args()) {
+ for (Value *V : Call.args()) {
if (V->getType()->isPointerTy() &&
- !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
- isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
+ !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
+ isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
ArgNos.push_back(ArgNo);
ArgNo++;
}
- assert(ArgNo == CS.arg_size() && "sanity check");
+ assert(ArgNo == Call.arg_size() && "sanity check");
if (!ArgNos.empty()) {
- AttributeList AS = CS.getAttributes();
- LLVMContext &Ctx = CS.getInstruction()->getContext();
+ AttributeList AS = Call.getAttributes();
+ LLVMContext &Ctx = Call.getContext();
AS = AS.addParamAttribute(Ctx, ArgNos,
Attribute::get(Ctx, Attribute::NonNull));
- CS.setAttributes(AS);
+ Call.setAttributes(AS);
Changed = true;
}
// If the callee is a pointer to a function, attempt to move any casts to the
// arguments of the call/invoke.
- Value *Callee = CS.getCalledValue();
- if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
+ Value *Callee = Call.getCalledValue();
+ if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
return nullptr;
if (Function *CalleeF = dyn_cast<Function>(Callee)) {
// Remove the convergent attr on calls when the callee is not convergent.
- if (CS.isConvergent() && !CalleeF->isConvergent() &&
+ if (Call.isConvergent() && !CalleeF->isConvergent() &&
!CalleeF->isIntrinsic()) {
- LLVM_DEBUG(dbgs() << "Removing convergent attr from instr "
- << CS.getInstruction() << "\n");
- CS.setNotConvergent();
- return CS.getInstruction();
+ LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
+ << "\n");
+ Call.setNotConvergent();
+ return &Call;
}
// If the call and callee calling conventions don't match, this call must
// be unreachable, as the call is undefined.
- if (CalleeF->getCallingConv() != CS.getCallingConv() &&
+ if (CalleeF->getCallingConv() != Call.getCallingConv() &&
// Only do this for calls to a function with a body. A prototype may
// not actually end up matching the implementation's calling conv for a
// variety of reasons (e.g. it may be written in assembly).
!CalleeF->isDeclaration()) {
- Instruction *OldCall = CS.getInstruction();
+ Instruction *OldCall = &Call;
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
OldCall);
@@ -4183,15 +4182,14 @@ Instruction *InstCombiner::visitCallSite
}
if ((isa<ConstantPointerNull>(Callee) &&
- !NullPointerIsDefined(CS.getInstruction()->getFunction())) ||
+ !NullPointerIsDefined(Call.getFunction())) ||
isa<UndefValue>(Callee)) {
- // If CS does not return void then replaceAllUsesWith undef.
+ // If Call does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
- if (!CS.getInstruction()->getType()->isVoidTy())
- replaceInstUsesWith(*CS.getInstruction(),
- UndefValue::get(CS.getInstruction()->getType()));
+ if (!Call.getType()->isVoidTy())
+ replaceInstUsesWith(Call, UndefValue::get(Call.getType()));
- if (isa<InvokeInst>(CS.getInstruction())) {
+ if (isa<InvokeInst>(Call)) {
// Can't remove an invoke because we cannot change the CFG.
return nullptr;
}
@@ -4201,13 +4199,13 @@ Instruction *InstCombiner::visitCallSite
// that we can't modify the CFG here.
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
- CS.getInstruction());
+ &Call);
- return eraseInstFromFunction(*CS.getInstruction());
+ return eraseInstFromFunction(Call);
}
if (IntrinsicInst *II = findInitTrampoline(Callee))
- return transformCallThroughTrampoline(CS, II);
+ return transformCallThroughTrampoline(Call, *II);
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
@@ -4215,39 +4213,39 @@ Instruction *InstCombiner::visitCallSite
int ix = FTy->getNumParams();
// See if we can optimize any arguments passed through the varargs area of
// the call.
- for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
- E = CS.arg_end(); I != E; ++I, ++ix) {
+ for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
+ I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
- if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
+ if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
}
}
- if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
+ if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
// Inline asm calls cannot throw - mark them 'nounwind'.
- CS.setDoesNotThrow();
+ Call.setDoesNotThrow();
Changed = true;
}
// Try to optimize the call if possible, we require DataLayout for most of
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
+ if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
Instruction *I = tryOptimizeCall(CI);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return eraseInstFromFunction(*I);
}
- return Changed ? CS.getInstruction() : nullptr;
+ return Changed ? &Call : nullptr;
}
/// If the callee is a constexpr cast of a function, attempt to move the cast to
/// the arguments of the call/invoke.
-bool InstCombiner::transformConstExprCastCall(CallSite CS) {
- auto *Callee = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
+ auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
if (!Callee)
return false;
@@ -4261,11 +4259,11 @@ bool InstCombiner::transformConstExprCas
// prototype with the exception of pointee types. The code below doesn't
// implement that, so we can't do this transform.
// TODO: Do the transform if it only requires adding pointer casts.
- if (CS.isMustTailCall())
+ if (Call.isMustTailCall())
return false;
- Instruction *Caller = CS.getInstruction();
- const AttributeList &CallerPAL = CS.getAttributes();
+ Instruction *Caller = &Call;
+ const AttributeList &CallerPAL = Call.getAttributes();
// Okay, this is a cast from a function to a different type. Unless doing so
// would cause a type conversion of one of our arguments, change this call to
@@ -4296,7 +4294,7 @@ bool InstCombiner::transformConstExprCas
return false; // Attribute not compatible with transformed value.
}
- // If the callsite is an invoke instruction, and the return value is used by
+ // If the callbase is an invoke instruction, and the return value is used by
// a PHI node in a successor, we cannot change the return type of the call
// because there is no place to put the cast instruction (without breaking
// the critical edge). Bail out in this case.
@@ -4309,7 +4307,7 @@ bool InstCombiner::transformConstExprCas
return false;
}
- unsigned NumActualArgs = CS.arg_size();
+ unsigned NumActualArgs = Call.arg_size();
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
// Prevent us turning:
@@ -4324,7 +4322,7 @@ bool InstCombiner::transformConstExprCas
Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
return false;
- CallSite::arg_iterator AI = CS.arg_begin();
+ auto AI = Call.arg_begin();
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
Type *ParamTy = FT->getParamType(i);
Type *ActTy = (*AI)->getType();
@@ -4336,7 +4334,7 @@ bool InstCombiner::transformConstExprCas
.overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
return false; // Attribute not compatible with transformed value.
- if (CS.isInAllocaArgument(i))
+ if (Call.isInAllocaArgument(i))
return false; // Cannot transform to and from inalloca.
// If the parameter is passed as a byval argument, then we have to have a
@@ -4361,7 +4359,7 @@ bool InstCombiner::transformConstExprCas
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
- PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
+ PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
@@ -4400,7 +4398,7 @@ bool InstCombiner::transformConstExprCas
// with the existing attributes. Wipe out any problematic attributes.
RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
- AI = CS.arg_begin();
+ AI = Call.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
Type *ParamTy = FT->getParamType(i);
@@ -4454,29 +4452,29 @@ bool InstCombiner::transformConstExprCas
Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
SmallVector<OperandBundleDef, 1> OpBundles;
- CS.getOperandBundlesAsDefs(OpBundles);
+ Call.getOperandBundlesAsDefs(OpBundles);
- CallSite NewCS;
+ CallBase *NewCall;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
- NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
- II->getUnwindDest(), Args, OpBundles);
+ NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
+ II->getUnwindDest(), Args, OpBundles);
} else {
- NewCS = Builder.CreateCall(Callee, Args, OpBundles);
- cast<CallInst>(NewCS.getInstruction())
- ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
- }
- NewCS->takeName(Caller);
- NewCS.setCallingConv(CS.getCallingConv());
- NewCS.setAttributes(NewCallerPAL);
+ NewCall = Builder.CreateCall(Callee, Args, OpBundles);
+ cast<CallInst>(NewCall)->setTailCallKind(
+ cast<CallInst>(Caller)->getTailCallKind());
+ }
+ NewCall->takeName(Caller);
+ NewCall->setCallingConv(Call.getCallingConv());
+ NewCall->setAttributes(NewCallerPAL);
// Preserve the weight metadata for the new call instruction. The metadata
// is used by SamplePGO to check callsite's hotness.
uint64_t W;
if (Caller->extractProfTotalWeight(W))
- NewCS->setProfWeight(W);
+ NewCall->setProfWeight(W);
// Insert a cast of the return type as necessary.
- Instruction *NC = NewCS.getInstruction();
+ Instruction *NC = NewCall;
Value *NV = NC;
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
if (!NV->getType()->isVoidTy()) {
@@ -4516,22 +4514,19 @@ bool InstCombiner::transformConstExprCas
/// Turn a call to a function created by init_trampoline / adjust_trampoline
/// intrinsic pair into a direct call to the underlying function.
Instruction *
-InstCombiner::transformCallThroughTrampoline(CallSite CS,
- IntrinsicInst *Tramp) {
- Value *Callee = CS.getCalledValue();
+InstCombiner::transformCallThroughTrampoline(CallBase &Call,
+ IntrinsicInst &Tramp) {
+ Value *Callee = Call.getCalledValue();
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
- AttributeList Attrs = CS.getAttributes();
+ AttributeList Attrs = Call.getAttributes();
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
if (Attrs.hasAttrSomewhere(Attribute::Nest))
return nullptr;
- assert(Tramp &&
- "transformCallThroughTrampoline called with incorrect CallSite.");
-
- Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
+ Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
FunctionType *NestFTy = cast<FunctionType>(NestF->getValueType());
AttributeList NestAttrs = NestF->getAttributes();
@@ -4554,22 +4549,21 @@ InstCombiner::transformCallThroughTrampo
}
if (NestTy) {
- Instruction *Caller = CS.getInstruction();
std::vector<Value*> NewArgs;
std::vector<AttributeSet> NewArgAttrs;
- NewArgs.reserve(CS.arg_size() + 1);
- NewArgAttrs.reserve(CS.arg_size());
+ NewArgs.reserve(Call.arg_size() + 1);
+ NewArgAttrs.reserve(Call.arg_size());
// Insert the nest argument into the call argument list, which may
// mean appending it. Likewise for attributes.
{
unsigned ArgNo = 0;
- CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
+ auto I = Call.arg_begin(), E = Call.arg_end();
do {
if (ArgNo == NestArgNo) {
// Add the chain argument and attributes.
- Value *NestVal = Tramp->getArgOperand(2);
+ Value *NestVal = Tramp.getArgOperand(2);
if (NestVal->getType() != NestTy)
NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
NewArgs.push_back(NestVal);
@@ -4631,10 +4625,10 @@ InstCombiner::transformCallThroughTrampo
Attrs.getRetAttributes(), NewArgAttrs);
SmallVector<OperandBundleDef, 1> OpBundles;
- CS.getOperandBundlesAsDefs(OpBundles);
+ Call.getOperandBundlesAsDefs(OpBundles);
Instruction *NewCaller;
- if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
NewCaller = InvokeInst::Create(NewCallee,
II->getNormalDest(), II->getUnwindDest(),
NewArgs, OpBundles);
@@ -4643,12 +4637,12 @@ InstCombiner::transformCallThroughTrampo
} else {
NewCaller = CallInst::Create(NewCallee, NewArgs, OpBundles);
cast<CallInst>(NewCaller)->setTailCallKind(
- cast<CallInst>(Caller)->getTailCallKind());
+ cast<CallInst>(Call).getTailCallKind());
cast<CallInst>(NewCaller)->setCallingConv(
- cast<CallInst>(Caller)->getCallingConv());
+ cast<CallInst>(Call).getCallingConv());
cast<CallInst>(NewCaller)->setAttributes(NewPAL);
}
- NewCaller->setDebugLoc(Caller->getDebugLoc());
+ NewCaller->setDebugLoc(Call.getDebugLoc());
return NewCaller;
}
@@ -4660,6 +4654,6 @@ InstCombiner::transformCallThroughTrampo
Constant *NewCallee =
NestF->getType() == PTy ? NestF :
ConstantExpr::getBitCast(NestF, PTy);
- CS.setCalledFunction(NewCallee);
- return CS.getInstruction();
+ Call.setCalledFunction(NewCallee);
+ return &Call;
}
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h Thu Jan 31 09:23:29 2019
@@ -52,7 +52,6 @@ namespace llvm {
class APInt;
class AssumptionCache;
-class CallSite;
class DataLayout;
class DominatorTree;
class GEPOperator;
@@ -467,11 +466,11 @@ private:
Instruction &CtxI, Value *&OperationResult,
Constant *&OverflowResult);
- Instruction *visitCallSite(CallSite CS);
+ Instruction *visitCallBase(CallBase &Call);
Instruction *tryOptimizeCall(CallInst *CI);
- bool transformConstExprCastCall(CallSite CS);
- Instruction *transformCallThroughTrampoline(CallSite CS,
- IntrinsicInst *Tramp);
+ bool transformConstExprCastCall(CallBase &Call);
+ Instruction *transformCallThroughTrampoline(CallBase &Call,
+ IntrinsicInst &Tramp);
/// Transform (zext icmp) to bitwise / integer operations in order to
/// eliminate it.
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Thu Jan 31 09:23:29 2019
@@ -88,29 +88,29 @@ isOnlyCopiedFromConstantGlobal(Value *V,
continue;
}
- if (auto CS = CallSite(I)) {
+ if (auto *Call = dyn_cast<CallBase>(I)) {
// If this is the function being called then we treat it like a load and
// ignore it.
- if (CS.isCallee(&U))
+ if (Call->isCallee(&U))
continue;
- unsigned DataOpNo = CS.getDataOperandNo(&U);
- bool IsArgOperand = CS.isArgOperand(&U);
+ unsigned DataOpNo = Call->getDataOperandNo(&U);
+ bool IsArgOperand = Call->isArgOperand(&U);
// Inalloca arguments are clobbered by the call.
- if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
+ if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
return false;
// If this is a readonly/readnone call site, then we know it is just a
// load (but one that potentially returns the value itself), so we can
// ignore it if we know that the value isn't captured.
- if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
+ if (Call->onlyReadsMemory() &&
+ (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
continue;
// If this is being passed as a byval argument, the caller is making a
// copy, so it is only a read of the alloca.
- if (IsArgOperand && CS.isByValArgument(DataOpNo))
+ if (IsArgOperand && Call->isByValArgument(DataOpNo))
continue;
}
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp?rev=352771&r1=352770&r2=352771&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp Thu Jan 31 09:23:29 2019
@@ -1180,7 +1180,8 @@ Instruction *InstCombiner::visitFDiv(Bin
IRBuilder<> B(&I);
IRBuilder<>::FastMathFlagGuard FMFGuard(B);
B.setFastMathFlags(I.getFastMathFlags());
- AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes();
+ AttributeList Attrs =
+ cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
LibFunc_tanl, B, Attrs);
if (IsCot)
More information about the llvm-commits
mailing list