[llvm] b91f78d - [CallSite removal][MemCpyOptimizer] Replace CallSite with CallBase. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 17 10:08:36 PDT 2020
Author: Craig Topper
Date: 2020-04-17T10:07:20-07:00
New Revision: b91f78db370bb8161472acd75a67916d033c3348
URL: https://github.com/llvm/llvm-project/commit/b91f78db370bb8161472acd75a67916d033c3348
DIFF: https://github.com/llvm/llvm-project/commit/b91f78db370bb8161472acd75a67916d033c3348.diff
LOG: [CallSite removal][MemCpyOptimizer] Replace CallSite with CallBase. NFC
There are also some adjustments to use MaybeAlign in here due
to CallBase::getParamAlignment() being deprecated. It would
be cleaner if getOrEnforceKnownAlignment was migrated
to Align/MaybeAlign.
Differential Revision: https://reviews.llvm.org/D78345
Added:
Modified:
llvm/include/llvm/Transforms/IPO/Attributor.h
llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
llvm/lib/Transforms/IPO/Attributor.cpp
llvm/lib/Transforms/IPO/AttributorAttributes.cpp
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index 2363a74d211b..99989775bde6 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -217,23 +217,6 @@ struct IRPosition {
return IRPosition(const_cast<CallBase &>(CB), Kind(ArgNo));
}
- /// Create a position describing the function scope of \p ICS.
- static const IRPosition callsite_function(ImmutableCallSite ICS) {
- return IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction()));
- }
-
- /// Create a position describing the returned value of \p ICS.
- static const IRPosition callsite_returned(ImmutableCallSite ICS) {
- return IRPosition::callsite_returned(cast<CallBase>(*ICS.getInstruction()));
- }
-
- /// Create a position describing the argument of \p ICS at position \p ArgNo.
- static const IRPosition callsite_argument(ImmutableCallSite ICS,
- unsigned ArgNo) {
- return IRPosition::callsite_argument(cast<CallBase>(*ICS.getInstruction()),
- ArgNo);
- }
-
/// Create a position describing the argument of \p ACS at position \p ArgNo.
static const IRPosition callsite_argument(AbstractCallSite ACS,
unsigned ArgNo) {
@@ -418,9 +401,9 @@ struct IRPosition {
return;
AttributeList AttrList;
- CallSite CS = CallSite(&getAnchorValue());
- if (CS)
- AttrList = CS.getAttributes();
+ auto *CB = dyn_cast<CallBase>(&getAnchorValue());
+ if (CB)
+ AttrList = CB->getAttributes();
else
AttrList = getAssociatedFunction()->getAttributes();
@@ -428,8 +411,8 @@ struct IRPosition {
for (Attribute::AttrKind AK : AKs)
AttrList = AttrList.removeAttribute(Ctx, getAttrIdx(), AK);
- if (CS)
- CS.setAttributes(AttrList);
+ if (CB)
+ CB->setAttributes(AttrList);
else
getAssociatedFunction()->setAttributes(AttrList);
}
diff --git a/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h b/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
index 5386f58b2b82..41180c5c678d 100644
--- a/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
+++ b/llvm/include/llvm/Transforms/Scalar/MemCpyOptimizer.h
@@ -16,7 +16,6 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/PassManager.h"
#include <cstdint>
#include <functional>
@@ -66,7 +65,7 @@ class MemCpyOptPass : public PassInfoMixin<MemCpyOptPass> {
bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
- bool processByValArgument(CallSite CS, unsigned ArgNo);
+ bool processByValArgument(CallBase &CB, unsigned ArgNo);
Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
Value *ByteVal);
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index da1cbcc90be5..9304a923546a 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -228,7 +228,7 @@ IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
case IRPosition::IRP_CALL_SITE:
case IRPosition::IRP_CALL_SITE_RETURNED:
case IRPosition::IRP_CALL_SITE_ARGUMENT:
- Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
+ Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
break;
}
@@ -253,7 +253,7 @@ IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
case IRPosition::IRP_CALL_SITE:
case IRPosition::IRP_CALL_SITE_RETURNED:
case IRPosition::IRP_CALL_SITE_ARGUMENT:
- CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
+ cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
break;
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
@@ -269,7 +269,7 @@ const IRPosition IRPosition::TombstoneKey(256);
SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
IRPositions.emplace_back(IRP);
- ImmutableCallSite ICS(&IRP.getAnchorValue());
+ const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
switch (IRP.getPositionKind()) {
case IRPosition::IRP_INVALID:
case IRPosition::IRP_FLOAT:
@@ -280,41 +280,40 @@ SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
return;
case IRPosition::IRP_CALL_SITE:
- assert(ICS && "Expected call site!");
+ assert(CB && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
- if (!ICS.hasOperandBundles())
- if (const Function *Callee = ICS.getCalledFunction())
+ if (!CB->hasOperandBundles())
+ if (const Function *Callee = CB->getCalledFunction())
IRPositions.emplace_back(IRPosition::function(*Callee));
return;
case IRPosition::IRP_CALL_SITE_RETURNED:
- assert(ICS && "Expected call site!");
+ assert(CB && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
- if (!ICS.hasOperandBundles()) {
- if (const Function *Callee = ICS.getCalledFunction()) {
+ if (!CB->hasOperandBundles()) {
+ if (const Function *Callee = CB->getCalledFunction()) {
IRPositions.emplace_back(IRPosition::returned(*Callee));
IRPositions.emplace_back(IRPosition::function(*Callee));
for (const Argument &Arg : Callee->args())
if (Arg.hasReturnedAttr()) {
IRPositions.emplace_back(
- IRPosition::callsite_argument(ICS, Arg.getArgNo()));
+ IRPosition::callsite_argument(*CB, Arg.getArgNo()));
IRPositions.emplace_back(
- IRPosition::value(*ICS.getArgOperand(Arg.getArgNo())));
+ IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
IRPositions.emplace_back(IRPosition::argument(Arg));
}
}
}
- IRPositions.emplace_back(
- IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
+ IRPositions.emplace_back(IRPosition::callsite_function(*CB));
return;
case IRPosition::IRP_CALL_SITE_ARGUMENT: {
int ArgNo = IRP.getArgNo();
- assert(ICS && ArgNo >= 0 && "Expected call site!");
+ assert(CB && ArgNo >= 0 && "Expected call site!");
// TODO: We need to look at the operand bundles similar to the redirection
// in CallBase.
- if (!ICS.hasOperandBundles()) {
- const Function *Callee = ICS.getCalledFunction();
+ if (!CB->hasOperandBundles()) {
+ const Function *Callee = CB->getCalledFunction();
if (Callee && Callee->arg_size() > unsigned(ArgNo))
IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
if (Callee)
@@ -369,8 +368,8 @@ bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
return false;
AttributeList AttrList;
- if (ImmutableCallSite ICS = ImmutableCallSite(&getAnchorValue()))
- AttrList = ICS.getAttributes();
+ if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
+ AttrList = CB->getAttributes();
else
AttrList = getAssociatedFunction()->getAttributes();
@@ -510,12 +509,12 @@ bool Attributor::isAssumedDead(const Use &U,
return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
CheckBBLivenessOnly, DepClass);
- if (CallSite CS = CallSite(UserI)) {
+ if (auto *CB = dyn_cast<CallBase>(UserI)) {
// For call site argument uses we can check if the argument is
// unused/dead.
- if (CS.isArgOperand(&U)) {
+ if (CB->isArgOperand(&U)) {
const IRPosition &CSArgPos =
- IRPosition::callsite_argument(CS, CS.getArgumentNo(&U));
+ IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
CheckBBLivenessOnly, DepClass);
}
@@ -1617,8 +1616,8 @@ void InformationCache::initializeInformationCache(const Function &CF,
// Note: There are no concrete attributes now so this is initially empty.
switch (I.getOpcode()) {
default:
- assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
- "New call site/base instruction type needs to be known in the "
+ assert(!isa<CallBase>(&I) &&
+ "New call base instruction type needs to be known in the "
"Attributor.");
break;
case Instruction::Call:
@@ -1687,8 +1686,8 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
if (!isModulePass() && !FI.CalledViaMustTail) {
for (const Use &U : F.uses())
- if (ImmutableCallSite ICS = ImmutableCallSite(U.getUser()))
- if (ICS.isCallee(&U) && ICS.isMustTailCall())
+ if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
+ if (CB->isCallee(&U) && CB->isMustTailCall())
FI.CalledViaMustTail = true;
}
@@ -1800,14 +1799,14 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
}
auto CallSitePred = [&](Instruction &I) -> bool {
- CallSite CS(&I);
- IRPosition CSRetPos = IRPosition::callsite_returned(CS);
+ auto *CB = dyn_cast<CallBase>(&I);
+ IRPosition CBRetPos = IRPosition::callsite_returned(*CB);
// Call sites might be dead if they do not have side effects and no live
// users. The return value might be dead if there are no live users.
- getOrCreateAAFor<AAIsDead>(CSRetPos);
+ getOrCreateAAFor<AAIsDead>(CBRetPos);
- Function *Callee = CS.getCalledFunction();
+ Function *Callee = CB->getCalledFunction();
// TODO: Even if the callee is not known now we might be able to simplify
// the call/callee.
if (!Callee)
@@ -1819,46 +1818,46 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
!Callee->hasMetadata(LLVMContext::MD_callback))
return true;
- if (!Callee->getReturnType()->isVoidTy() && !CS->use_empty()) {
+ if (!Callee->getReturnType()->isVoidTy() && !CB->use_empty()) {
- IRPosition CSRetPos = IRPosition::callsite_returned(CS);
+ IRPosition CBRetPos = IRPosition::callsite_returned(*CB);
// Call site return integer values might be limited by a constant range.
if (Callee->getReturnType()->isIntegerTy())
- getOrCreateAAFor<AAValueConstantRange>(CSRetPos);
+ getOrCreateAAFor<AAValueConstantRange>(CBRetPos);
}
- for (int i = 0, e = CS.getNumArgOperands(); i < e; i++) {
+ for (int i = 0, e = CB->getNumArgOperands(); i < e; i++) {
- IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
+ IRPosition CBArgPos = IRPosition::callsite_argument(*CB, i);
// Every call site argument might be dead.
- getOrCreateAAFor<AAIsDead>(CSArgPos);
+ getOrCreateAAFor<AAIsDead>(CBArgPos);
// Call site argument might be simplified.
- getOrCreateAAFor<AAValueSimplify>(CSArgPos);
+ getOrCreateAAFor<AAValueSimplify>(CBArgPos);
- if (!CS.getArgument(i)->getType()->isPointerTy())
+ if (!CB->getArgOperand(i)->getType()->isPointerTy())
continue;
// Call site argument attribute "non-null".
- getOrCreateAAFor<AANonNull>(CSArgPos);
+ getOrCreateAAFor<AANonNull>(CBArgPos);
// Call site argument attribute "no-alias".
- getOrCreateAAFor<AANoAlias>(CSArgPos);
+ getOrCreateAAFor<AANoAlias>(CBArgPos);
// Call site argument attribute "dereferenceable".
- getOrCreateAAFor<AADereferenceable>(CSArgPos);
+ getOrCreateAAFor<AADereferenceable>(CBArgPos);
// Call site argument attribute "align".
- getOrCreateAAFor<AAAlign>(CSArgPos);
+ getOrCreateAAFor<AAAlign>(CBArgPos);
// Call site argument attribute
// "readnone/readonly/writeonly/..."
- getOrCreateAAFor<AAMemoryBehavior>(CSArgPos);
+ getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
// Call site argument attribute "nofree".
- getOrCreateAAFor<AANoFree>(CSArgPos);
+ getOrCreateAAFor<AANoFree>(CBArgPos);
}
return true;
};
@@ -1983,9 +1982,9 @@ static bool runAttributorOnFunctions(InformationCache &InfoCache,
// do it eagerly.
if (F->hasLocalLinkage()) {
if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
- ImmutableCallSite ICS(U.getUser());
- return ICS && ICS.isCallee(&U) &&
- Functions.count(const_cast<Function *>(ICS.getCaller()));
+ const auto *CB = dyn_cast<CallBase>(U.getUser());
+ return CB && CB->isCallee(&U) &&
+ Functions.count(const_cast<Function *>(CB->getCaller()));
}))
continue;
}
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 9ea314f06888..9c58b3f91fc8 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -280,11 +280,11 @@ static bool genericValueTraversal(
if (V->getType()->isPointerTy()) {
NewV = V->stripPointerCasts();
} else {
- CallSite CS(V);
- if (CS && CS.getCalledFunction()) {
- for (Argument &Arg : CS.getCalledFunction()->args())
+ auto *CB = dyn_cast<CallBase>(V);
+ if (CB && CB->getCalledFunction()) {
+ for (Argument &Arg : CB->getCalledFunction()->args())
if (Arg.hasReturnedAttr()) {
- NewV = CS.getArgOperand(Arg.getArgNo());
+ NewV = CB->getArgOperand(Arg.getArgNo());
break;
}
}
@@ -688,9 +688,9 @@ struct AANoUnwindImpl : AANoUnwind {
if (!I.mayThrow())
return true;
- if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
+ if (const auto *CB = dyn_cast<CallBase>(&I)) {
const auto &NoUnwindAA =
- A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
+ A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
return NoUnwindAA.isAssumedNoUnwind();
}
return false;
@@ -1273,8 +1273,7 @@ bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
}
bool AANoSyncImpl::isVolatile(Instruction *I) {
- assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
- "Calls should not be checked here");
+ assert(!isa<CallBase>(I) && "Calls should not be checked here");
switch (I->getOpcode()) {
case Instruction::AtomicRMW:
@@ -1299,12 +1298,12 @@ ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
return true;
- if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
- if (ICS.hasFnAttr(Attribute::NoSync))
+ if (const auto *CB = dyn_cast<CallBase>(&I)) {
+ if (CB->hasFnAttr(Attribute::NoSync))
return true;
const auto &NoSyncAA =
- A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
+ A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
if (NoSyncAA.isAssumedNoSync())
return true;
return false;
@@ -1323,7 +1322,7 @@ ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
return true;
// non-convergent and readnone imply nosync.
- return !ImmutableCallSite(&I).isConvergent();
+ return !cast<CallBase>(I).isConvergent();
};
if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
@@ -1377,12 +1376,12 @@ struct AANoFreeImpl : public AANoFree {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForNoFree = [&](Instruction &I) {
- ImmutableCallSite ICS(&I);
- if (ICS.hasFnAttr(Attribute::NoFree))
+ const auto &CB = cast<CallBase>(I);
+ if (CB.hasFnAttr(Attribute::NoFree))
return true;
const auto &NoFreeAA =
- A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
+ A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
return NoFreeAA.isAssumedNoFree();
};
@@ -1559,17 +1558,17 @@ static int64_t getKnownNonNullAndDerefBytesForUse(
bool NullPointerIsDefined =
F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
const DataLayout &DL = A.getInfoCache().getDL();
- if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
- if (ICS.isBundleOperand(U))
+ if (const auto *CB = dyn_cast<CallBase>(I)) {
+ if (CB->isBundleOperand(U))
return 0;
- if (ICS.isCallee(U)) {
+ if (CB->isCallee(U)) {
IsNonNull |= !NullPointerIsDefined;
return 0;
}
- unsigned ArgNo = ICS.getArgumentNo(U);
- IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
+ unsigned ArgNo = CB->getArgOperandNo(U);
+ IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
// As long as we only use known information there is no need to track
// dependences here.
auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
@@ -1803,17 +1802,17 @@ struct AANoRecurseFunction final : AANoRecurseImpl {
// If the above check does not hold anymore we look at the calls.
auto CheckForNoRecurse = [&](Instruction &I) {
- ImmutableCallSite ICS(&I);
- if (ICS.hasFnAttr(Attribute::NoRecurse))
+ const auto &CB = cast<CallBase>(I);
+ if (CB.hasFnAttr(Attribute::NoRecurse))
return true;
const auto &NoRecurseAA =
- A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
+ A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
if (!NoRecurseAA.isAssumedNoRecurse())
return false;
// Recursion to the same function
- if (ICS.getCalledFunction() == getAnchorScope())
+ if (CB.getCalledFunction() == getAnchorScope())
return false;
return true;
@@ -2114,7 +2113,7 @@ struct AAWillReturnImpl : public AAWillReturn {
/// See AbstractAttribute::updateImpl(...).
ChangeStatus updateImpl(Attributor &A) override {
auto CheckForWillReturn = [&](Instruction &I) {
- IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
+ IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
if (WillReturnAA.isKnownWillReturn())
return true;
@@ -2321,8 +2320,8 @@ struct AANoAliasCallSiteArgument final : AANoAliasImpl {
/// See AbstractAttribute::initialize(...).
void initialize(Attributor &A) override {
// See callsite argument attribute and callee argument attribute.
- ImmutableCallSite ICS(&getAnchorValue());
- if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
+ const auto &CB = cast<CallBase>(getAnchorValue());
+ if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
indicateOptimisticFixpoint();
Value &Val = getAssociatedValue();
if (isa<ConstantPointerNull>(Val) &&
@@ -2335,32 +2334,32 @@ struct AANoAliasCallSiteArgument final : AANoAliasImpl {
/// \p OtherArgNo of \p ICS (= the underlying call site).
bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
const AAMemoryBehavior &MemBehaviorAA,
- ImmutableCallSite ICS, unsigned OtherArgNo) {
+ const CallBase &CB, unsigned OtherArgNo) {
// We do not need to worry about aliasing with the underlying IRP.
if (this->getArgNo() == (int)OtherArgNo)
return false;
// If it is not a pointer or pointer vector we do not alias.
- const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
+ const Value *ArgOp = CB.getArgOperand(OtherArgNo);
if (!ArgOp->getType()->isPtrOrPtrVectorTy())
return false;
- auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
- *this, IRPosition::callsite_argument(ICS, OtherArgNo),
+ auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
+ *this, IRPosition::callsite_argument(CB, OtherArgNo),
/* TrackDependence */ false);
// If the argument is readnone, there is no read-write aliasing.
- if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
- A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
+ if (CBArgMemBehaviorAA.isAssumedReadNone()) {
+ A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
return false;
}
// If the argument is readonly and the underlying value is readonly, there
// is no read-write aliasing.
bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
- if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
+ if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
- A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
+ A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
return false;
}
@@ -2457,10 +2456,10 @@ struct AANoAliasCallSiteArgument final : AANoAliasImpl {
// Check there is no other pointer argument which could alias with the
// value passed at this call site.
// TODO: AbstractCallSite
- ImmutableCallSite ICS(&getAnchorValue());
- for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
+ const auto &CB = cast<CallBase>(getAnchorValue());
+ for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
OtherArgNo++)
- if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
+ if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
return false;
return true;
@@ -2511,8 +2510,8 @@ struct AANoAliasReturned final : AANoAliasImpl {
/// For now, we can only deduce noalias if we have call sites.
/// FIXME: add more support.
- ImmutableCallSite ICS(&RV);
- if (!ICS)
+ const auto *CB = dyn_cast<CallBase>(&RV);
+ if (!CB)
return false;
const IRPosition &RVPos = IRPosition::value(RV);
@@ -2984,8 +2983,8 @@ struct AAIsDeadFunction : public AAIsDead {
// is a performance optimization for blocks with calls to a lot of internal
// functions. It can however cause dead functions to be treated as live.
for (const Instruction &I : BB)
- if (ImmutableCallSite ICS = ImmutableCallSite(&I))
- if (const Function *F = ICS.getCalledFunction())
+ if (const auto *CB = dyn_cast<CallBase>(&I))
+ if (const Function *F = CB->getCalledFunction())
if (F->hasLocalLinkage())
A.markLiveInternalFunction(*F);
return true;
@@ -3477,12 +3476,12 @@ static unsigned getKnownAlignForUse(Attributor &A,
}
MaybeAlign MA;
- if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
- if (ICS.isBundleOperand(U) || ICS.isCallee(U))
+ if (const auto *CB = dyn_cast<CallBase>(I)) {
+ if (CB->isBundleOperand(U) || CB->isCallee(U))
return 0;
- unsigned ArgNo = ICS.getArgumentNo(U);
- IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
+ unsigned ArgNo = CB->getArgOperandNo(U);
+ IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
// As long as we only use known information there is no need to track
// dependences here.
auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
@@ -3985,13 +3984,13 @@ struct AACaptureUseTracker final : public CaptureTracker {
// For now we only use special logic for call sites. However, the tracker
// itself knows about a lot of other non-capturing cases already.
- CallSite CS(UInst);
- if (!CS || !CS.isArgOperand(U))
+ auto *CB = dyn_cast<CallBase>(UInst);
+ if (!CB || !CB->isArgOperand(U))
return isCapturedIn(/* Memory */ true, /* Integer */ true,
/* Return */ true);
- unsigned ArgNo = CS.getArgumentNo(U);
- const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
+ unsigned ArgNo = CB->getArgOperandNo(U);
+ const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
// If we have a abstract no-capture attribute for the argument we can use
// it to justify a non-capture attribute here. This allows recursion!
auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
@@ -3999,7 +3998,7 @@ struct AACaptureUseTracker final : public CaptureTracker {
return isCapturedIn(/* Memory */ false, /* Integer */ false,
/* Return */ false);
if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
- addPotentialCopy(CS);
+ addPotentialCopy(*CB);
return isCapturedIn(/* Memory */ false, /* Integer */ false,
/* Return */ false);
}
@@ -4010,9 +4009,7 @@ struct AACaptureUseTracker final : public CaptureTracker {
}
/// Register \p CS as potential copy of the value we are checking.
- void addPotentialCopy(CallSite CS) {
- PotentialCopies.push_back(CS.getInstruction());
- }
+ void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
/// See CaptureTracker::shouldExplore(...).
bool shouldExplore(const Use *U) override {
@@ -4992,10 +4989,9 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
// Helper to check if for the given call site the associated argument is
// passed to a callback where the privatization would be
diff erent.
- auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
+ auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
SmallVector<const Use *, 4> CallbackUses;
- AbstractCallSite::getCallbackUses(cast<CallBase>(*CS.getInstruction()),
- CallbackUses);
+ AbstractCallSite::getCallbackUses(CB, CallbackUses);
for (const Use *U : CallbackUses) {
AbstractCallSite CBACS(U);
assert(CBACS && CBACS.isCallbackCall());
@@ -5012,7 +5008,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
<< CBArgNo << "@" << CBACS.getCalledFunction()->getName()
<< ")\n[AAPrivatizablePtr] " << CBArg << " : "
<< CBACS.getCallArgOperand(CBArg) << " vs "
- << CS.getArgOperand(ArgNo) << "\n"
+ << CB.getArgOperand(ArgNo) << "\n"
<< "[AAPrivatizablePtr] " << CBArg << " : "
<< CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
});
@@ -5094,7 +5090,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
// here.
auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
if (ACS.isDirectCall())
- return IsCompatiblePrivArgOfCallback(CallSite(ACS.getInstruction()));
+ return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
if (ACS.isCallbackCall())
return IsCompatiblePrivArgOfDirectCS(ACS);
return false;
@@ -5727,9 +5723,9 @@ ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
// If the instruction has an own memory behavior state, use it to restrict
// the local state. No further analysis is required as the other memory
// state is as optimistic as it gets.
- if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
+ if (const auto *CB = dyn_cast<CallBase>(&I)) {
const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
- *this, IRPosition::callsite_function(ICS));
+ *this, IRPosition::callsite_function(*CB));
intersectAssumedBits(MemBehaviorAA.getAssumed());
return !isAtFixpoint();
}
@@ -5827,8 +5823,8 @@ bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
// By default we follow all uses assuming UserI might leak information on U,
// we have special handling for call sites operands though.
- ImmutableCallSite ICS(UserI);
- if (!ICS || !ICS.isArgOperand(U))
+ const auto *CB = dyn_cast<CallBase>(UserI);
+ if (!CB || !CB->isArgOperand(U))
return true;
// If the use is a call argument known not to be captured, the users of
@@ -5838,9 +5834,9 @@ bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
// call might the argument "through return", which we allow and for which we
// need to check call users.
if (U->get()->getType()->isPointerTy()) {
- unsigned ArgNo = ICS.getArgumentNo(U);
+ unsigned ArgNo = CB->getArgOperandNo(U);
const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
- *this, IRPosition::callsite_argument(ICS, ArgNo),
+ *this, IRPosition::callsite_argument(*CB, ArgNo),
/* TrackDependence */ true, DepClassTy::OPTIONAL);
return !ArgNoCaptureAA.isAssumedNoCapture();
}
@@ -5874,17 +5870,17 @@ void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
case Instruction::Invoke: {
// For call sites we look at the argument memory behavior attribute (this
// could be recursive!) in order to restrict our own state.
- ImmutableCallSite ICS(UserI);
+ const auto *CB = cast<CallBase>(UserI);
// Give up on operand bundles.
- if (ICS.isBundleOperand(U)) {
+ if (CB->isBundleOperand(U)) {
indicatePessimisticFixpoint();
return;
}
// Calling a function does read the function pointer, maybe write it if the
// function is self-modifying.
- if (ICS.isCallee(U)) {
+ if (CB->isCallee(U)) {
removeAssumedBits(NO_READS);
break;
}
@@ -5893,9 +5889,9 @@ void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
// argument.
IRPosition Pos;
if (U->get()->getType()->isPointerTy())
- Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
+ Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
else
- Pos = IRPosition::callsite_function(ICS);
+ Pos = IRPosition::callsite_function(*CB);
const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
*this, Pos,
/* TrackDependence */ true, DepClassTy::OPTIONAL);
@@ -6184,9 +6180,9 @@ void AAMemoryLocationImpl::categorizePtrValue(
Changed);
return true;
}
- if (ImmutableCallSite ICS = ImmutableCallSite(&V)) {
+ if (const auto *CB = dyn_cast<CallBase>(&V)) {
const auto &NoAliasAA =
- A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
+ A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
if (NoAliasAA.isAssumedNoAlias()) {
updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_MALLOCED_MEM, &I,
&V, Changed);
@@ -6226,32 +6222,32 @@ AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
AAMemoryLocation::StateType AccessedLocs;
AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
- if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
+ if (auto *CB = dyn_cast<CallBase>(&I)) {
// First check if we assume any memory is access is visible.
- const auto &ICSMemLocationAA =
- A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(ICS));
+ const auto &CBMemLocationAA =
+ A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
- << " [" << ICSMemLocationAA << "]\n");
+ << " [" << CBMemLocationAA << "]\n");
- if (ICSMemLocationAA.isAssumedReadNone())
+ if (CBMemLocationAA.isAssumedReadNone())
return NO_LOCATIONS;
- if (ICSMemLocationAA.isAssumedInaccessibleMemOnly()) {
+ if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap,
NO_INACCESSIBLE_MEM, &I, nullptr, Changed);
return AccessedLocs.getAssumed();
}
- uint32_t ICSAssumedNotAccessedLocs =
- ICSMemLocationAA.getAssumedNotAccessedLocation();
+ uint32_t CBAssumedNotAccessedLocs =
+ CBMemLocationAA.getAssumedNotAccessedLocation();
// Set the argmemonly and global bit as we handle them separately below.
- uint32_t ICSAssumedNotAccessedLocsNoArgMem =
- ICSAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
+ uint32_t CBAssumedNotAccessedLocsNoArgMem =
+ CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
- if (ICSAssumedNotAccessedLocsNoArgMem & CurMLK)
+ if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
continue;
updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, CurMLK, &I,
nullptr, Changed);
@@ -6259,7 +6255,7 @@ AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
// Now handle global memory if it might be accessed. This is slightly tricky
// as NO_GLOBAL_MEM has multiple bits set.
- bool HasGlobalAccesses = ((~ICSAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
+ bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
if (HasGlobalAccesses) {
auto AccessPred = [&](const Instruction *, const Value *Ptr,
AccessKind Kind, MemoryLocationsKind MLK) {
@@ -6267,7 +6263,7 @@ AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
Ptr, Changed);
return true;
};
- if (!ICSMemLocationAA.checkForAllAccessesToMemoryKind(
+ if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
return AccessedLocs.getWorstState();
}
@@ -6277,18 +6273,18 @@ AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
<< getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
// Now handle argument memory if it might be accessed.
- bool HasArgAccesses = ((~ICSAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
+ bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
if (HasArgAccesses) {
- for (unsigned ArgNo = 0, e = ICS.getNumArgOperands(); ArgNo < e;
+ for (unsigned ArgNo = 0, e = CB->getNumArgOperands(); ArgNo < e;
++ArgNo) {
// Skip non-pointer arguments.
- const Value *ArgOp = ICS.getArgOperand(ArgNo);
+ const Value *ArgOp = CB->getArgOperand(ArgNo);
if (!ArgOp->getType()->isPtrOrPtrVectorTy())
continue;
// Skip readnone arguments.
- const IRPosition &ArgOpIRP = IRPosition::callsite_argument(ICS, ArgNo);
+ const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
*this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 0987cd6597a4..7c1610f8073a 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1242,15 +1242,15 @@ bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
}
/// This is called on every byval argument in call sites.
-bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
- const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
+bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
+ const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
// Find out what feeds this byval argument.
- Value *ByValArg = CS.getArgument(ArgNo);
+ Value *ByValArg = CB.getArgOperand(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
MemDepResult DepInfo = MD->getPointerDependencyFrom(
MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
- CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
+ CB.getIterator(), CB.getParent());
if (!DepInfo.isClobber())
return false;
@@ -1269,16 +1269,16 @@ bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
// Get the alignment of the byval. If the call doesn't specify the alignment,
// then it is some target specific value that we can't know.
- unsigned ByValAlign = CS.getParamAlignment(ArgNo);
- if (ByValAlign == 0) return false;
+ MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
+ if (!ByValAlign) return false;
// If it is greater than the memcpy, then we check to see if we can force the
// source of the memcpy to the alignment we need. If we fail, we bail out.
AssumptionCache &AC = LookupAssumptionCache();
DominatorTree &DT = LookupDomTree();
- if (MDep->getSourceAlignment() < ByValAlign &&
- getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
- CS.getInstruction(), &AC, &DT) < ByValAlign)
+ if (MDep->getSourceAlign() < ByValAlign &&
+ getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign->value(), DL,
+ &CB, &AC, &DT) < ByValAlign->value())
return false;
// The address space of the memcpy source must match the byval argument
@@ -1297,14 +1297,14 @@ bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
// not just the defining memcpy.
MemDepResult SourceDep = MD->getPointerDependencyFrom(
MemoryLocation::getForSource(MDep), false,
- CS.getInstruction()->getIterator(), MDep->getParent());
+ CB.getIterator(), MDep->getParent());
if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
return false;
Value *TmpCast = MDep->getSource();
if (MDep->getSource()->getType() != ByValArg->getType()) {
BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
- "tmpcast", CS.getInstruction());
+ "tmpcast", &CB);
// Set the tmpcast's DebugLoc to MDep's
TmpBitCast->setDebugLoc(MDep->getDebugLoc());
TmpCast = TmpBitCast;
@@ -1312,10 +1312,10 @@ bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n"
- << " " << *CS.getInstruction() << "\n");
+ << " " << CB << "\n");
// Otherwise we're good! Update the byval argument.
- CS.setArgument(ArgNo, TmpCast);
+ CB.setArgOperand(ArgNo, TmpCast);
++NumMemCpyInstr;
return true;
}
@@ -1349,10 +1349,10 @@ bool MemCpyOptPass::iterateOnFunction(Function &F) {
RepeatInstruction = processMemCpy(M);
else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
RepeatInstruction = processMemMove(M);
- else if (auto CS = CallSite(I)) {
- for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
- if (CS.isByValArgument(i))
- MadeChange |= processByValArgument(CS, i);
+ else if (auto *CB = dyn_cast<CallBase>(I)) {
+ for (unsigned I = 0, E = CB->arg_size(); I != E; ++I)
+ if (CB->isByValArgument(I))
+ MadeChange |= processByValArgument(*CB, I);
}
// Reprocess the instruction if desired.
More information about the llvm-commits
mailing list