[Lldb-commits] [lldb] a58b62b - [IR] Replace all uses of CallBase::getCalledValue() with getCalledOperand().
Craig Topper via lldb-commits
lldb-commits at lists.llvm.org
Mon Apr 27 22:18:29 PDT 2020
Author: Craig Topper
Date: 2020-04-27T22:17:03-07:00
New Revision: a58b62b4a2b96c31b49338b262b609db746449e8
URL: https://github.com/llvm/llvm-project/commit/a58b62b4a2b96c31b49338b262b609db746449e8
DIFF: https://github.com/llvm/llvm-project/commit/a58b62b4a2b96c31b49338b262b609db746449e8.diff
LOG: [IR] Replace all uses of CallBase::getCalledValue() with getCalledOperand().
This method has been commented as deprecated for a while. Remove
it and replace all uses with the equivalent getCalledOperand().
I also made a few cleanups in here. For example, to removes use
of getElementType on a pointer when we could just use getFunctionType
from the call.
Differential Revision: https://reviews.llvm.org/D78882
Added:
Modified:
clang/lib/CodeGen/CGCall.cpp
clang/lib/CodeGen/CGObjC.cpp
clang/lib/CodeGen/CodeGenFunction.cpp
lldb/source/Expression/IRInterpreter.cpp
lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp
lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp
llvm/include/llvm-c/Core.h
llvm/include/llvm/CodeGen/FastISel.h
llvm/include/llvm/IR/AbstractCallSite.h
llvm/include/llvm/IR/InstrTypes.h
llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
llvm/lib/Analysis/InstructionSimplify.cpp
llvm/lib/Analysis/Lint.cpp
llvm/lib/Analysis/MemorySSA.cpp
llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
llvm/lib/Analysis/StackSafetyAnalysis.cpp
llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
llvm/lib/CodeGen/CodeGenPrepare.cpp
llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/lib/CodeGen/WasmEHPrepare.cpp
llvm/lib/CodeGen/WinEHPrepare.cpp
llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
llvm/lib/IR/AsmWriter.cpp
llvm/lib/IR/Core.cpp
llvm/lib/IR/Instructions.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/ARM/ARMFastISel.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86WinEHState.cpp
llvm/lib/Transforms/Coroutines/CoroSplit.cpp
llvm/lib/Transforms/IPO/CalledValuePropagation.cpp
llvm/lib/Transforms/IPO/GlobalOpt.cpp
llvm/lib/Transforms/IPO/PruneEH.cpp
llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
llvm/lib/Transforms/Utils/Evaluator.cpp
llvm/lib/Transforms/Utils/InlineFunction.cpp
llvm/lib/Transforms/Utils/Local.cpp
llvm/lib/Transforms/Utils/LowerInvoke.cpp
llvm/lib/Transforms/Utils/SimplifyCFG.cpp
llvm/test/LTO/X86/type-mapping-bug3.ll
llvm/tools/llvm-diff/DiffConsumer.cpp
llvm/tools/llvm-diff/DifferenceEngine.cpp
mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index b3fda6bd4ea3..e0e895f202c2 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -2701,10 +2701,10 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
bool doRetainAutorelease;
- if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
+ if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
doRetainAutorelease = true;
- } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
- .objc_retainAutoreleasedReturnValue) {
+ } else if (call->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
// If we emitted an assembly marker for this call (and the
@@ -2720,8 +2720,8 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
assert(prev);
}
assert(isa<llvm::CallInst>(prev));
- assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
- CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
+ assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
+ CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
InstsToKill.push_back(prev);
}
} else {
@@ -2764,8 +2764,8 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
// Look for a retain call.
llvm::CallInst *retainCall =
dyn_cast<llvm::CallInst>(result->stripPointerCasts());
- if (!retainCall ||
- retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
+ if (!retainCall || retainCall->getCalledOperand() !=
+ CGF.CGM.getObjCEntrypoints().objc_retain)
return nullptr;
// Look for an ordinary load of 'self'.
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index b512592e78ff..e3df22aef2c1 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -2160,7 +2160,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
if (!mandatory && isa<llvm::Instruction>(result)) {
llvm::CallInst *call
= cast<llvm::CallInst>(result->stripPointerCasts());
- assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock);
+ assert(call->getCalledOperand() ==
+ CGM.getObjCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
llvm::MDNode::get(Builder.getContext(), None));
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 9929c154e37b..24e01c46045e 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2464,7 +2464,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
llvm::Value *OffsetValue, llvm::Value *TheCheck,
llvm::Instruction *Assumption) {
assert(Assumption && isa<llvm::CallInst>(Assumption) &&
- cast<llvm::CallInst>(Assumption)->getCalledValue() ==
+ cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
llvm::Intrinsic::getDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
llvm::Intrinsic::assume) &&
diff --git a/lldb/source/Expression/IRInterpreter.cpp b/lldb/source/Expression/IRInterpreter.cpp
index ddb2d975d554..6f61a4bd26bf 100644
--- a/lldb/source/Expression/IRInterpreter.cpp
+++ b/lldb/source/Expression/IRInterpreter.cpp
@@ -1371,7 +1371,7 @@ bool IRInterpreter::Interpret(llvm::Module &module, llvm::Function &function,
// Find the address of the callee function
lldb_private::Scalar I;
- const llvm::Value *val = call_inst->getCalledValue();
+ const llvm::Value *val = call_inst->getCalledOperand();
if (!frame.EvaluateValue(I, val, module)) {
error.SetErrorToGenericError();
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp
index d3ab51118c24..b92f00ec2b63 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/IRDynamicChecks.cpp
@@ -465,7 +465,7 @@ class ObjcObjectChecker : public Instrumenter {
}
static llvm::Function *GetCalledFunction(llvm::CallInst *inst) {
- return GetFunction(inst->getCalledValue());
+ return GetFunction(inst->getCalledOperand());
}
bool InspectInstruction(llvm::Instruction &i) override {
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp
index 92294ff9c727..8819b8a773f5 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/IRForTarget.cpp
@@ -1395,7 +1395,7 @@ bool IRForTarget::RemoveCXAAtExit(BasicBlock &basic_block) {
if (func && func->getName() == "__cxa_atexit")
remove = true;
- llvm::Value *val = call->getCalledValue();
+ llvm::Value *val = call->getCalledOperand();
if (val && val->getName() == "__cxa_atexit")
remove = true;
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index 2d6490dcbcf1..25802edc9982 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -3252,8 +3252,8 @@ LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef C);
* This expects an LLVMValueRef that corresponds to a llvm::CallInst or
* llvm::InvokeInst.
*
- * @see llvm::CallInst::getCalledValue()
- * @see llvm::InvokeInst::getCalledValue()
+ * @see llvm::CallInst::getCalledOperand()
+ * @see llvm::InvokeInst::getCalledOperand()
*/
LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr);
diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h
index 02ec1d38dfb4..7662179db44d 100644
--- a/llvm/include/llvm/CodeGen/FastISel.h
+++ b/llvm/include/llvm/CodeGen/FastISel.h
@@ -127,7 +127,7 @@ class FastISel {
const CallBase &Call,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
- Callee = Call.getCalledValue();
+ Callee = Call.getCalledOperand();
Symbol = Target;
IsInReg = Call.hasRetAttr(Attribute::InReg);
diff --git a/llvm/include/llvm/IR/AbstractCallSite.h b/llvm/include/llvm/IR/AbstractCallSite.h
index c13521e157d0..18c0db8fa49d 100644
--- a/llvm/include/llvm/IR/AbstractCallSite.h
+++ b/llvm/include/llvm/IR/AbstractCallSite.h
@@ -201,16 +201,16 @@ class AbstractCallSite {
}
/// Return the pointer to function that is being called.
- Value *getCalledValue() const {
+ Value *getCalledOperand() const {
if (isDirectCall())
- return CB->getCalledValue();
+ return CB->getCalledOperand();
return CB->getArgOperand(getCallArgOperandNoForCallee());
}
/// Return the function being called if this is a direct call, otherwise
/// return null (if it's an indirect call).
Function *getCalledFunction() const {
- Value *V = getCalledValue();
+ Value *V = getCalledOperand();
return V ? dyn_cast<Function>(V->stripPointerCasts()) : nullptr;
}
};
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index d28b3301fa6a..0353afd8e2d0 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -1286,10 +1286,6 @@ class CallBase : public Instruction {
Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
- // DEPRECATED: This routine will be removed in favor of `getCalledOperand` in
- // the near future.
- Value *getCalledValue() const { return getCalledOperand(); }
-
const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
diff --git a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 2e44bbd3a8ca..b1433c579af8 100644
--- a/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -114,7 +114,7 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
Stores.insert(&*I);
Instruction &Inst = *I;
if (auto *Call = dyn_cast<CallBase>(&Inst)) {
- Value *Callee = Call->getCalledValue();
+ Value *Callee = Call->getCalledOperand();
// Skip actual functions for direct function calls.
if (!isa<Function>(Callee) && isInterestingPointer(Callee))
Pointers.insert(Callee);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index d1970d45a686..7765cb706273 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5407,7 +5407,7 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
}
Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
- Value *Callee = Call->getCalledValue();
+ Value *Callee = Call->getCalledOperand();
// musttail calls can only be simplified if they are also DCEd.
// As we can't guarantee this here, don't simplify them.
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 338c6a4137e7..30a0846d2af8 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -220,7 +220,7 @@ void Lint::visitFunction(Function &F) {
}
void Lint::visitCallBase(CallBase &I) {
- Value *Callee = I.getCalledValue();
+ Value *Callee = I.getCalledOperand();
visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr,
MemRef::Callee);
diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp
index 77f4125b5d4b..aeb3fecf53f2 100644
--- a/llvm/lib/Analysis/MemorySSA.cpp
+++ b/llvm/lib/Analysis/MemorySSA.cpp
@@ -167,7 +167,7 @@ class MemoryLocOrCall {
if (!IsCall)
return Loc == Other.Loc;
- if (Call->getCalledValue() != Other.Call->getCalledValue())
+ if (Call->getCalledOperand() != Other.Call->getCalledOperand())
return false;
return Call->arg_size() == Other.Call->arg_size() &&
@@ -203,7 +203,7 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {
hash_code hash =
hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
- MLOC.getCall()->getCalledValue()));
+ MLOC.getCall()->getCalledOperand()));
for (const Value *Arg : MLOC.getCall()->args())
hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
diff --git a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index 2f6e2a01c9d2..b900a38fdc0a 100644
--- a/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -316,7 +316,7 @@ static void computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
if (HasLocalsInUsedOrAsm && CI && CI->isInlineAsm())
HasInlineAsmMaybeReferencingInternal = true;
- auto *CalledValue = CB->getCalledValue();
+ auto *CalledValue = CB->getCalledOperand();
auto *CalledFunction = CB->getCalledFunction();
if (CalledValue && !CalledFunction) {
CalledValue = CalledValue->stripPointerCasts();
diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
index 1e06fd8fe3a0..716027a3413c 100644
--- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -353,7 +353,7 @@ bool StackSafetyLocalAnalysis::analyzeAllUses(const Value *Ptr, UseInfo &US) {
// Do not follow aliases, otherwise we could inadvertently follow
// dso_preemptable aliases or aliases with interposable linkage.
const GlobalValue *Callee =
- dyn_cast<GlobalValue>(CB.getCalledValue()->stripPointerCasts());
+ dyn_cast<GlobalValue>(CB.getCalledOperand()->stripPointerCasts());
if (!Callee) {
US.updateRange(UnknownRange);
return false;
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index be8307b0f7d7..0a6741d97f74 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -2775,7 +2775,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
case Instruction::Invoke: {
const InvokeInst *II = cast<InvokeInst>(&I);
- const Value *Callee = II->getCalledValue();
+ const Value *Callee = II->getCalledOperand();
FunctionType *FTy = II->getFunctionType();
if (II->hasOperandBundles())
@@ -2851,7 +2851,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
}
case Instruction::CallBr: {
const CallBrInst *CBI = cast<CallBrInst>(&I);
- const Value *Callee = CBI->getCalledValue();
+ const Value *Callee = CBI->getCalledOperand();
FunctionType *FTy = CBI->getFunctionType();
if (CBI->hasOperandBundles())
@@ -3029,7 +3029,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(Flags);
Vals.push_back(VE.getTypeID(FTy));
- pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee
+ pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee
// Emit value #'s for the fixed parameters.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 3c95b2ecc07d..d37bac69480d 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1889,7 +1889,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
- if (isa<InlineAsm>(CI->getCalledValue())) {
+ if (CI->isInlineAsm()) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
@@ -4636,7 +4636,7 @@ static bool FindAllMemoryUses(
continue;
}
- InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
+ InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
if (!IA) return true;
// If this is a memory operand, we're cool, otherwise bail out.
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 70bb27279673..86fbb4b9ae8a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -52,7 +52,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
// Try looking through a bitcast from one function type to another.
// Commonly happens with calls to objc_msgSend().
- const Value *CalleeV = CB.getCalledValue()->stripPointerCasts();
+ const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
if (const Function *F = dyn_cast<Function>(CalleeV))
Info.Callee = MachineOperand::CreateGA(F, 0);
else
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 72d4dbeed1c2..1b1d7c5b0dd9 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1607,7 +1607,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
// scan is done to check if any instructions are calls.
bool Success =
CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
- [&]() { return getOrCreateVReg(*CB.getCalledValue()); });
+ [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
// Check if we just inserted a tail call.
if (Success) {
@@ -1712,9 +1712,8 @@ bool IRTranslator::translateInvoke(const User &U,
const BasicBlock *ReturnBB = I.getSuccessor(0);
const BasicBlock *EHPadBB = I.getSuccessor(1);
- const Value *Callee = I.getCalledValue();
- const Function *Fn = dyn_cast<Function>(Callee);
- if (isa<InlineAsm>(Callee))
+ const Function *Fn = I.getCalledFunction();
+ if (I.isInlineAsm())
return false;
// FIXME: support invoking patchpoint and statepoint intrinsics.
diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index e42591067792..f05394fcf241 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -32,7 +32,7 @@ void InlineAsmLowering::anchor() {}
bool InlineAsmLowering::lowerInlineAsm(MachineIRBuilder &MIRBuilder,
const CallBase &Call) const {
- const InlineAsm *IA = cast<InlineAsm>(Call.getCalledValue());
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
StringRef ConstraintStr = IA->getConstraintString();
bool HasOnlyMemoryClobber = false;
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 21076047f77d..1be9544848ec 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -39,7 +39,7 @@ static bool lowerLoadRelative(Function &F) {
for (auto I = F.use_begin(), E = F.use_end(); I != E;) {
auto CI = dyn_cast<CallInst>(I->getUser());
++I;
- if (!CI || CI->getCalledValue() != &F)
+ if (!CI || CI->getCalledOperand() != &F)
continue;
IRBuilder<> B(CI);
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 7ffc84865189..d117c6032d3c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1290,7 +1290,7 @@ bool FastISel::lowerCall(const CallInst *CI) {
IsTailCall = false;
CallLoweringInfo CLI;
- CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), *CI)
+ CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
.setTailCall(IsTailCall);
return lowerCallTo(CLI);
@@ -1300,7 +1300,7 @@ bool FastISel::selectCall(const User *I) {
const CallInst *Call = cast<CallInst>(I);
// Handle simple inline asms.
- if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
+ if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
// If the inline asm has side effects, then make sure that no local value
// lives across by flushing the local value map.
if (IA->hasSideEffects())
diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 9b97d5990cc8..3f302d5fa0ca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -183,7 +183,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// Look for inline asm that clobbers the SP register.
if (auto *Call = dyn_cast<CallBase>(&I)) {
- if (isa<InlineAsm>(Call->getCalledValue())) {
+ if (Call->isInlineAsm()) {
unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
std::vector<TargetLowering::AsmOperandInfo> Ops =
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 70f0a1ab48cd..4ba0cd652017 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -346,7 +346,7 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
const char *AsmError = ", possible invalid constraint for vector type";
if (const CallInst *CI = dyn_cast<CallInst>(I))
- if (isa<InlineAsm>(CI->getCalledValue()))
+ if (isa<InlineAsm>(CI->getCalledOperand()))
return Ctx.emitError(I, ErrMsg + AsmError);
return Ctx.emitError(I, ErrMsg);
@@ -2776,7 +2776,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
LLVMContext::OB_cfguardtarget}) &&
"Cannot lower invokes with arbitrary operand bundles yet!");
- const Value *Callee(I.getCalledValue());
+ const Value *Callee(I.getCalledOperand());
const Function *Fn = dyn_cast<Function>(Callee);
if (isa<InlineAsm>(Callee))
visitInlineAsm(I);
@@ -2856,7 +2856,7 @@ void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
{LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
"Cannot lower callbrs with arbitrary operand bundles yet!");
- assert(isa<InlineAsm>(I.getCalledValue()) &&
+ assert(isa<InlineAsm>(I.getCalledOperand()) &&
"Only know how to handle inlineasm callbr");
visitInlineAsm(I);
CopyToExportRegsIfNeeded(&I);
@@ -7476,7 +7476,7 @@ bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
void SelectionDAGBuilder::visitCall(const CallInst &I) {
// Handle inline assembly
diff erently.
- if (isa<InlineAsm>(I.getCalledValue())) {
+ if (I.isInlineAsm()) {
visitInlineAsm(I);
return;
}
@@ -7648,7 +7648,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
LLVMContext::OB_cfguardtarget}) &&
"Cannot lower calls with arbitrary operand bundles!");
- SDValue Callee = getValue(I.getCalledValue());
+ SDValue Callee = getValue(I.getCalledOperand());
if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
@@ -7949,7 +7949,7 @@ class ExtraFlags {
public:
explicit ExtraFlags(const CallBase &Call) {
- const InlineAsm *IA = cast<InlineAsm>(Call.getCalledValue());
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
if (IA->hasSideEffects())
Flags |= InlineAsm::Extra_HasSideEffects;
if (IA->isAlignStack())
@@ -7982,7 +7982,7 @@ class ExtraFlags {
/// visitInlineAsm - Handle a call to an InlineAsm object.
void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
- const InlineAsm *IA = cast<InlineAsm>(Call.getCalledValue());
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
/// ConstraintOperands - Information about all of the constraints.
SDISelAsmOperandInfoVector ConstraintOperands;
@@ -8656,7 +8656,7 @@ void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
SmallVector<SDValue, 32> Ops;
SDLoc DL = getCurSDLoc();
- Callee = getValue(CI.getCalledValue());
+ Callee = getValue(CI.getCalledOperand());
NullPtr = DAG.getIntPtrConstant(0, DL, true);
// The stackmap intrinsic only records the live variables (the arguments
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 8a46d08852f1..ced8b1bb8304 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -4321,7 +4321,7 @@ TargetLowering::ParseConstraints(const DataLayout &DL,
const CallBase &Call) const {
/// Information about all of the constraints.
AsmOperandInfoVector ConstraintOperands;
- const InlineAsm *IA = cast<InlineAsm>(Call.getCalledValue());
+ const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
unsigned maCount = 0; // Largest number of multiple alternative constraints.
// Do a prepass over the constraints, canonicalizing them, and building up the
diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp
index b7e900573612..44f4fe2ff9b1 100644
--- a/llvm/lib/CodeGen/WasmEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp
@@ -358,9 +358,9 @@ void WasmEHPrepare::prepareEHPad(BasicBlock *BB, bool NeedPersonality,
Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr;
for (auto &U : FPI->uses()) {
if (auto *CI = dyn_cast<CallInst>(U.getUser())) {
- if (CI->getCalledValue() == GetExnF)
+ if (CI->getCalledOperand() == GetExnF)
GetExnCI = CI;
- if (CI->getCalledValue() == GetSelectorF)
+ if (CI->getCalledOperand() == GetSelectorF)
GetSelectorCI = CI;
}
}
diff --git a/llvm/lib/CodeGen/WinEHPrepare.cpp b/llvm/lib/CodeGen/WinEHPrepare.cpp
index d6c049f3d0da..cb75cd66a847 100644
--- a/llvm/lib/CodeGen/WinEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WinEHPrepare.cpp
@@ -955,7 +955,7 @@ void WinEHPrepare::removeImplausibleInstructions(Function &F) {
// Skip call sites which are nounwind intrinsics or inline asm.
auto *CalledFn =
- dyn_cast<Function>(CB->getCalledValue()->stripPointerCasts());
+ dyn_cast<Function>(CB->getCalledOperand()->stripPointerCasts());
if (CalledFn && ((CalledFn->isIntrinsic() && CB->doesNotThrow()) ||
CB->isInlineAsm()))
continue;
diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 46fc1bbe0a6b..62e1ea6e0f0a 100644
--- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1167,7 +1167,7 @@ void Interpreter::visitCallBase(CallBase &I) {
// To handle indirect calls, we must get the pointer value from the argument
// and treat it as a function pointer.
- GenericValue SRC = getOperandValue(SF.Caller->getCalledValue(), SF);
+ GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF);
callFunction((Function*)GVTOP(SRC), ArgVals);
}
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 9efcf6fa19e8..8a2777cf496d 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -3905,7 +3905,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
PrintCallingConv(CI->getCallingConv(), Out);
}
- Operand = CI->getCalledValue();
+ Operand = CI->getCalledOperand();
FunctionType *FTy = CI->getFunctionType();
Type *RetTy = FTy->getReturnType();
const AttributeList &PAL = CI->getAttributes();
@@ -3944,7 +3944,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperandBundles(CI);
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
- Operand = II->getCalledValue();
+ Operand = II->getCalledOperand();
FunctionType *FTy = II->getFunctionType();
Type *RetTy = FTy->getReturnType();
const AttributeList &PAL = II->getAttributes();
@@ -3987,7 +3987,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
Out << " unwind ";
writeOperand(II->getUnwindDest(), true);
} else if (const CallBrInst *CBI = dyn_cast<CallBrInst>(&I)) {
- Operand = CBI->getCalledValue();
+ Operand = CBI->getCalledOperand();
FunctionType *FTy = CBI->getFunctionType();
Type *RetTy = FTy->getReturnType();
const AttributeList &PAL = CBI->getAttributes();
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index cfa1ebfe63c6..4ebe6a0cee79 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -2840,7 +2840,7 @@ void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
}
LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) {
- return wrap(unwrap<CallBase>(Instr)->getCalledValue());
+ return wrap(unwrap<CallBase>(Instr)->getCalledOperand());
}
LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) {
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index e04a323e4fe0..cccbd512204e 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -255,7 +255,7 @@ unsigned CallBase::getNumSubclassExtraOperandsDynamic() const {
}
bool CallBase::isIndirectCall() const {
- const Value *V = getCalledValue();
+ const Value *V = getCalledOperand();
if (isa<Function>(V) || isa<Constant>(V))
return false;
return !isInlineAsm();
@@ -491,7 +491,7 @@ CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
- auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledValue(),
+ auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
Args, OpB, CI->getName(), InsertPt);
NewCI->setTailCallKind(CI->getTailCallKind());
NewCI->setCallingConv(CI->getCallingConv());
@@ -802,9 +802,9 @@ InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(II->arg_begin(), II->arg_end());
- auto *NewII = InvokeInst::Create(II->getFunctionType(), II->getCalledValue(),
- II->getNormalDest(), II->getUnwindDest(),
- Args, OpB, II->getName(), InsertPt);
+ auto *NewII = InvokeInst::Create(
+ II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),
+ II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
NewII->setCallingConv(II->getCallingConv());
NewII->SubclassOptionalData = II->SubclassOptionalData;
NewII->setAttributes(II->getAttributes());
@@ -885,11 +885,9 @@ CallBrInst *CallBrInst::Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> OpB,
Instruction *InsertPt) {
std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
- auto *NewCBI = CallBrInst::Create(CBI->getFunctionType(),
- CBI->getCalledValue(),
- CBI->getDefaultDest(),
- CBI->getIndirectDests(),
- Args, OpB, CBI->getName(), InsertPt);
+ auto *NewCBI = CallBrInst::Create(
+ CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
+ CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
NewCBI->setCallingConv(CBI->getCallingConv());
NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
NewCBI->setAttributes(CBI->getAttributes());
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 6037787ab3ed..636849c3000f 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -2872,9 +2872,9 @@ void Verifier::visitPHINode(PHINode &PN) {
}
void Verifier::visitCallBase(CallBase &Call) {
- Assert(Call.getCalledValue()->getType()->isPointerTy(),
+ Assert(Call.getCalledOperand()->getType()->isPointerTy(),
"Called function must be a pointer!", Call);
- PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
+ PointerType *FPTy = cast<PointerType>(Call.getCalledOperand()->getType());
Assert(FPTy->getElementType()->isFunctionTy(),
"Called function is not pointer to function type!", Call);
@@ -2907,8 +2907,8 @@ void Verifier::visitCallBase(CallBase &Call) {
bool IsIntrinsic = Call.getCalledFunction() &&
Call.getCalledFunction()->getName().startswith("llvm.");
- Function *Callee
- = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
+ Function *Callee =
+ dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
// Don't allow speculatable on call sites, unless the underlying function
diff --git a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
index 9135f1b40122..9a5615ac1c09 100644
--- a/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PromoteConstant.cpp
@@ -304,7 +304,7 @@ static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr,
// Do not mess with inline asm.
const CallInst *CI = dyn_cast<const CallInst>(Instr);
- return !(CI && isa<const InlineAsm>(CI->getCalledValue()));
+ return !(CI && CI->isInlineAsm());
}
/// Check if the given Cst should be converted into
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
index 7a691c318b0f..d241b4899b43 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
@@ -288,7 +288,7 @@ bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) {
for (Instruction &I : BB) {
if (auto *CB = dyn_cast<CallBase>(&I)) {
const Function *Callee =
- dyn_cast<Function>(CB->getCalledValue()->stripPointerCasts());
+ dyn_cast<Function>(CB->getCalledOperand()->stripPointerCasts());
// TODO: Do something with indirect calls.
if (!Callee) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp
index 5fbb00908e7e..b74f2a277786 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUFixFunctionBitcasts.cpp
@@ -34,7 +34,8 @@ class AMDGPUFixFunctionBitcasts final
void visitCallBase(CallBase &CB) {
if (CB.getCalledFunction())
return;
- auto *Callee = dyn_cast<Function>(CB.getCalledValue()->stripPointerCasts());
+ auto *Callee =
+ dyn_cast<Function>(CB.getCalledOperand()->stripPointerCasts());
if (Callee && isLegalToPromote(CB, Callee)) {
promoteCall(CB, Callee);
Modified = true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 402f55722ac1..48cfa064d09d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -782,7 +782,7 @@ bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
// Assume all function calls are a source of divergence.
if (const CallInst *CI = dyn_cast<CallInst>(V)) {
- if (isa<InlineAsm>(CI->getCalledValue()))
+ if (CI->isInlineAsm())
return isInlineAsmSourceOfDivergence(CI);
return true;
}
@@ -810,7 +810,7 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
}
if (const CallInst *CI = dyn_cast<CallInst>(V)) {
- if (isa<InlineAsm>(CI->getCalledValue()))
+ if (CI->isInlineAsm())
return !isInlineAsmSourceOfDivergence(CI);
return false;
}
@@ -838,7 +838,7 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
// If we have inline asm returning mixed SGPR and VGPR results, we inferred
// divergent for the overall struct return. We need to override it in the
// case we're extracting an SGPR component here.
- if (isa<InlineAsm>(CI->getCalledValue()))
+ if (CI->isInlineAsm())
return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 69cc96292597..4696b304c219 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -11009,7 +11009,7 @@ static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
const Value *V) const {
if (const CallInst *CI = dyn_cast<CallInst>(V)) {
- if (isa<InlineAsm>(CI->getCalledValue())) {
+ if (CI->isInlineAsm()) {
// FIXME: This cannot give a correct answer. This should only trigger in
// the case where inline asm returns mixed SGPR and VGPR results, used
// outside the defining block. We don't have a specific result to
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index 0939d890f36b..ad0125a599d4 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -2288,7 +2288,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
bool ARMFastISel::SelectCall(const Instruction *I,
const char *IntrMemName = nullptr) {
const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = CI->getCalledValue();
+ const Value *Callee = CI->getCalledOperand();
// Can't handle inline asm.
if (isa<InlineAsm>(Callee)) return false;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 3e4d798322ae..ee0050ea9430 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -16518,7 +16518,7 @@ bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
if (!Subtarget->hasV6Ops())
return false;
- InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
+ InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
std::string AsmStr = IA->getAsmString();
SmallVector<StringRef, 4> AsmPieces;
SplitString(AsmStr, AsmPieces, ";\n");
diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
index 067b5a84856c..0e943aa07a3b 100644
--- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
+++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
@@ -239,7 +239,7 @@ bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call,
if (!Call)
return false;
- const auto *GV = dyn_cast<GlobalValue>(Call->getCalledValue());
+ const auto *GV = dyn_cast<GlobalValue>(Call->getCalledOperand());
if (!GV)
return false;
if (GV->getName().startswith("llvm.preserve.array.access.index")) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 1fbbed770c5c..9669fb5964e3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1361,19 +1361,19 @@ Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
}
unsigned Alignment = 0;
- const Value *DirectCallee = CB->getCalledFunction();
+ const Function *DirectCallee = CB->getCalledFunction();
if (!DirectCallee) {
// We don't have a direct function symbol, but that may be because of
// constant cast instructions in the call.
// With bitcast'd call targets, the instruction will be the call
- if (isa<CallInst>(CB)) {
+ if (const auto *CI = dyn_cast<CallInst>(CB)) {
// Check if we have call alignment metadata
- if (getAlign(*cast<CallInst>(CB), Idx, Alignment))
+ if (getAlign(*CI, Idx, Alignment))
return Align(Alignment);
- const Value *CalleeV = cast<CallInst>(CB)->getCalledValue();
+ const Value *CalleeV = CI->getCalledOperand();
// Ignore any bitcast instructions
while (isa<ConstantExpr>(CalleeV)) {
const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
@@ -1385,15 +1385,15 @@ Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
// We have now looked past all of the bitcasts. Do we finally have a
// Function?
- if (isa<Function>(CalleeV))
- DirectCallee = CalleeV;
+ if (const auto *CalleeF = dyn_cast<Function>(CalleeV))
+ DirectCallee = CalleeF;
}
}
// Check for function alignment information if we found that the
// ultimate target is a Function
if (DirectCallee)
- if (getAlign(*cast<Function>(DirectCallee), Idx, Alignment))
+ if (getAlign(*DirectCallee, Idx, Alignment))
return Align(Alignment);
// Call is indirect or alignment information is not available, fall back to
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 28c7319e7d6f..69dca9b78caf 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -5368,7 +5368,7 @@ static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
MachineMemOperand::MOInvariant)
: MachineMemOperand::MONone;
- MachinePointerInfo MPI(CB ? CB->getCalledValue() : nullptr);
+ MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
// Registers used in building the DAG.
const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
index 85555b109a3f..00656f98aeaf 100644
--- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp
@@ -271,7 +271,7 @@ bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
J != JE; ++J) {
if (CallInst *CI = dyn_cast<CallInst>(J)) {
// Inline ASM is okay, unless it clobbers the ctr register.
- if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
+ if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
if (asmClobbersCTR(IA))
return true;
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index f7e98804bf6e..063f20b28088 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -761,7 +761,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
return false;
bool IsDirect = Func != nullptr;
- if (!IsDirect && isa<ConstantExpr>(Call->getCalledValue()))
+ if (!IsDirect && isa<ConstantExpr>(Call->getCalledOperand()))
return false;
FunctionType *FuncTy = Call->getFunctionType();
@@ -847,7 +847,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
unsigned CalleeReg = 0;
if (!IsDirect) {
- CalleeReg = getRegForValue(Call->getCalledValue());
+ CalleeReg = getRegForValue(Call->getCalledOperand());
if (!CalleeReg)
return false;
}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
index 464a54a011fc..7abb6fa8905c 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
@@ -76,7 +76,7 @@ static void findUses(Value *V, Function &F,
if (!CB)
// Skip uses that aren't immediately called
continue;
- Value *Callee = CB->getCalledValue();
+ Value *Callee = CB->getCalledOperand();
if (Callee != V)
// Skip calls where the function isn't the callee
continue;
@@ -307,7 +307,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
if (CallMain) {
Main->setName("__original_main");
auto *MainWrapper =
- cast<Function>(CallMain->getCalledValue()->stripPointerCasts());
+ cast<Function>(CallMain->getCalledOperand()->stripPointerCasts());
delete CallMain;
if (Main->isDeclaration()) {
// The wrapper is not needed in this case as we don't need to export
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index be4fe0b31226..93de6e8e4a58 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -258,11 +258,11 @@ class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
bool runSjLjOnFunction(Function &F);
Function *getFindMatchingCatch(Module &M, unsigned NumClauses);
- template <typename CallOrInvoke> Value *wrapInvoke(CallOrInvoke *CI);
+ Value *wrapInvoke(CallBase *CI);
void wrapTestSetjmp(BasicBlock *BB, DebugLoc DL, Value *Threw,
Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label,
Value *&LongjmpResult, BasicBlock *&EndBB);
- template <typename CallOrInvoke> Function *getInvokeWrapper(CallOrInvoke *CI);
+ Function *getInvokeWrapper(CallBase *CI);
bool areAllExceptionsAllowed() const { return EHWhitelistSet.empty(); }
bool canLongjmp(Module &M, const Value *Callee) const;
@@ -388,15 +388,14 @@ WebAssemblyLowerEmscriptenEHSjLj::getFindMatchingCatch(Module &M,
// %__THREW__.val = __THREW__; __THREW__ = 0;
// Returns %__THREW__.val, which indicates whether an exception is thrown (or
// whether longjmp occurred), for future use.
-template <typename CallOrInvoke>
-Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
+Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallBase *CI) {
LLVMContext &C = CI->getModule()->getContext();
// If we are calling a function that is noreturn, we must remove that
// attribute. The code we insert here does expect it to return, after we
// catch the exception.
if (CI->doesNotReturn()) {
- if (auto *F = dyn_cast<Function>(CI->getCalledValue()))
+ if (auto *F = CI->getCalledFunction())
F->removeFnAttr(Attribute::NoReturn);
CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
}
@@ -412,7 +411,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
SmallVector<Value *, 16> Args;
// Put the pointer to the callee as first argument, so it can be called
// within the invoke wrapper later
- Args.push_back(CI->getCalledValue());
+ Args.push_back(CI->getCalledOperand());
Args.append(CI->arg_begin(), CI->arg_end());
CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args);
NewCall->takeName(CI);
@@ -460,18 +459,10 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
}
// Get matching invoke wrapper based on callee signature
-template <typename CallOrInvoke>
-Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallOrInvoke *CI) {
+Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallBase *CI) {
Module *M = CI->getModule();
SmallVector<Type *, 16> ArgTys;
- Value *Callee = CI->getCalledValue();
- FunctionType *CalleeFTy;
- if (auto *F = dyn_cast<Function>(Callee))
- CalleeFTy = F->getFunctionType();
- else {
- auto *CalleeTy = cast<PointerType>(Callee->getType())->getElementType();
- CalleeFTy = cast<FunctionType>(CalleeTy);
- }
+ FunctionType *CalleeFTy = CI->getFunctionType();
std::string Sig = getSignature(CalleeFTy);
if (InvokeWrappers.find(Sig) != InvokeWrappers.end())
@@ -764,7 +755,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
LandingPads.insert(II->getLandingPadInst());
IRB.SetInsertPoint(II);
- bool NeedInvoke = AllowExceptions && canThrow(II->getCalledValue());
+ bool NeedInvoke = AllowExceptions && canThrow(II->getCalledOperand());
if (NeedInvoke) {
// Wrap invoke with invoke wrapper and generate preamble/postamble
Value *Threw = wrapInvoke(II);
@@ -779,7 +770,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
// call+branch
SmallVector<Value *, 16> Args(II->arg_begin(), II->arg_end());
CallInst *NewCall =
- IRB.CreateCall(II->getFunctionType(), II->getCalledValue(), Args);
+ IRB.CreateCall(II->getFunctionType(), II->getCalledOperand(), Args);
NewCall->takeName(II);
NewCall->setCallingConv(II->getCallingConv());
NewCall->setDebugLoc(II->getDebugLoc());
@@ -1005,7 +996,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
if (!CI)
continue;
- const Value *Callee = CI->getCalledValue();
+ const Value *Callee = CI->getCalledOperand();
if (!canLongjmp(M, Callee))
continue;
if (isEmAsmCall(M, Callee))
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b46131305bf1..9f0f3cd9bde1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -47867,7 +47867,7 @@ static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
}
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
- InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
+ InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
const std::string &AsmStr = IA->getAsmString();
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index a409907780db..553acdf6d9ca 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -754,7 +754,7 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
auto *Call = dyn_cast<CallBase>(&I);
if (!Call)
continue;
- if (Call->getCalledValue()->stripPointerCasts() !=
+ if (Call->getCalledOperand()->stripPointerCasts() !=
SetJmp3.getCallee()->stripPointerCasts())
continue;
diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
index 7a1d32896f96..7d719fd084cb 100644
--- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -1167,7 +1167,7 @@ static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
if (!CB)
return false;
- auto *Callee = CB->getCalledValue()->stripPointerCasts();
+ auto *Callee = CB->getCalledOperand()->stripPointerCasts();
// See if the callsite is for resumption or destruction of the coroutine.
auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
@@ -1197,7 +1197,7 @@ static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
}
// Grab the CalledValue from CB before erasing the CallInstr.
- auto *CalledValue = CB->getCalledValue();
+ auto *CalledValue = CB->getCalledOperand();
CB->eraseFromParent();
// If no more users remove it. Usually it is a bitcast of SubFn.
diff --git a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp
index 98a1df5cd440..74f11fa30959 100644
--- a/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp
+++ b/llvm/lib/Transforms/IPO/CalledValuePropagation.cpp
@@ -385,7 +385,7 @@ static bool runCVP(Module &M) {
bool Changed = false;
MDBuilder MDB(M.getContext());
for (CallBase *C : Lattice.getIndirectCalls()) {
- auto RegI = CVPLatticeKey(C->getCalledValue(), IPOGrouping::Register);
+ auto RegI = CVPLatticeKey(C->getCalledOperand(), IPOGrouping::Register);
CVPLatticeVal LV = Solver.getExistingValueState(RegI);
if (!LV.isFunctionSet() || LV.getFunctions().empty())
continue;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index 2f0783017ce6..e01d813da9a5 100644
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -658,12 +658,12 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
return false; // Storing the value.
}
} else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
- if (CI->getCalledValue() != V) {
+ if (CI->getCalledOperand() != V) {
//cerr << "NONTRAPPING USE: " << *U;
return false; // Not calling the ptr
}
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
- if (II->getCalledValue() != V) {
+ if (II->getCalledOperand() != V) {
//cerr << "NONTRAPPING USE: " << *U;
return false; // Not calling the ptr
}
@@ -721,7 +721,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
}
} else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
CallBase *CB = cast<CallBase>(I);
- if (CB->getCalledValue() == V) {
+ if (CB->getCalledOperand() == V) {
// Calling through the pointer! Turn into a direct call, but be careful
// that the pointer is not also being passed as an argument.
CB->setCalledOperand(NewV);
diff --git a/llvm/lib/Transforms/IPO/PruneEH.cpp b/llvm/lib/Transforms/IPO/PruneEH.cpp
index 874791b5fa26..a16dc664db64 100644
--- a/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -136,7 +136,7 @@ static bool runImpl(CallGraphSCC &SCC, CallGraph &CG) {
}
if (CheckReturnViaAsm && !SCCMightReturn)
if (const auto *CB = dyn_cast<CallBase>(&I))
- if (const auto *IA = dyn_cast<InlineAsm>(CB->getCalledValue()))
+ if (const auto *IA = dyn_cast<InlineAsm>(CB->getCalledOperand()))
if (IA->hasSideEffects())
SCCMightReturn = true;
}
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 9b09c52cdea9..40177d93a2f1 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -1028,7 +1028,7 @@ void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo,
VCallSite.emitRemark("single-impl",
TheFn->stripPointerCasts()->getName(), OREGetter);
VCallSite.CB.setCalledOperand(ConstantExpr::getBitCast(
- TheFn, VCallSite.CB.getCalledValue()->getType()));
+ TheFn, VCallSite.CB.getCalledOperand()->getType()));
// This use is no longer unsafe.
if (VCallSite.NumUnsafeUses)
--*VCallSite.NumUnsafeUses;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 765eb2474006..d561b7a06063 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -4163,7 +4163,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Note: New assumption intrinsics created here are registered by
// the InstCombineIRInserter object.
FunctionType *AssumeIntrinsicTy = II->getFunctionType();
- Value *AssumeIntrinsic = II->getCalledValue();
+ Value *AssumeIntrinsic = II->getCalledOperand();
Value *A, *B;
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());
@@ -4541,7 +4541,7 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
// If the callee is a pointer to a function, attempt to move any casts to the
// arguments of the call/callbr/invoke.
- Value *Callee = Call.getCalledValue();
+ Value *Callee = Call.getCalledOperand();
if (!isa<Function>(Callee) && transformConstExprCastCall(Call))
return nullptr;
@@ -4660,7 +4660,8 @@ Instruction *InstCombiner::visitCallBase(CallBase &Call) {
/// If the callee is a constexpr cast of a function, attempt to move the cast to
/// the arguments of the call/callbr/invoke.
bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
- auto *Callee = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
+ auto *Callee =
+ dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
if (!Callee)
return false;
@@ -4778,7 +4779,7 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
- PointerType *APTy = cast<PointerType>(Call.getCalledValue()->getType());
+ PointerType *APTy = cast<PointerType>(Call.getCalledOperand()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
@@ -4946,7 +4947,7 @@ bool InstCombiner::transformConstExprCastCall(CallBase &Call) {
Instruction *
InstCombiner::transformCallThroughTrampoline(CallBase &Call,
IntrinsicInst &Tramp) {
- Value *Callee = Call.getCalledValue();
+ Value *Callee = Call.getCalledOperand();
Type *CalleeTy = Callee->getType();
FunctionType *FTy = Call.getFunctionType();
AttributeList Attrs = Call.getAttributes();
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index d12ceea8b157..4556e3275a10 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1378,7 +1378,7 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
*Alignment = 0;
PtrOperand = XCHG->getPointerOperand();
} else if (auto CI = dyn_cast<CallInst>(I)) {
- auto *F = dyn_cast<Function>(CI->getCalledValue());
+ auto *F = CI->getCalledFunction();
if (F && (F->getName().startswith("llvm.masked.load.") ||
F->getName().startswith("llvm.masked.store."))) {
unsigned OpOffset = 0;
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 7952ae7ea440..0de956c13ceb 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1553,7 +1553,7 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
auto *MTI = cast<MemTransferInst>(
- IRB.CreateCall(I.getFunctionType(), I.getCalledValue(),
+ IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
{DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
if (ClPreserveAlignment) {
MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
@@ -1593,7 +1593,7 @@ void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
void DFSanVisitor::visitCallBase(CallBase &CB) {
Function *F = CB.getCalledFunction();
- if ((F && F->isIntrinsic()) || isa<InlineAsm>(CB.getCalledValue())) {
+ if ((F && F->isIntrinsic()) || CB.isInlineAsm()) {
visitOperandShadowInst(CB);
return;
}
@@ -1606,7 +1606,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
IRBuilder<> IRB(&CB);
DenseMap<Value *, Function *>::iterator i =
- DFSF.DFS.UnwrappedFnMap.find(CB.getCalledValue());
+ DFSF.DFS.UnwrappedFnMap.find(CB.getCalledOperand());
if (i != DFSF.DFS.UnwrappedFnMap.end()) {
Function *F = i->second;
switch (DFSF.DFS.getWrapperKind(F)) {
@@ -1728,8 +1728,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
}
}
- FunctionType *FT = cast<FunctionType>(
- CB.getCalledValue()->getType()->getPointerElementType());
+ FunctionType *FT = CB.getFunctionType();
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
IRB.CreateStore(DFSF.getShadow(CB.getArgOperand(i)),
@@ -1766,7 +1765,7 @@ void DFSanVisitor::visitCallBase(CallBase &CB) {
if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
Value *Func =
- IRB.CreateBitCast(CB.getCalledValue(), PointerType::getUnqual(NewFT));
+ IRB.CreateBitCast(CB.getCalledOperand(), PointerType::getUnqual(NewFT));
std::vector<Value *> Args;
auto i = CB.arg_begin(), E = CB.arg_end();
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 2753d73e0cf3..79e78208997a 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2743,7 +2743,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
: Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
Value *V1 = I.getOperand(0);
Value *V2 = I.getOperand(1);
- Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledValue(),
+ Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledOperand(),
{IRB.CreateBitCast(S1, V1->getType()), V2});
Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
@@ -3761,7 +3761,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
const DataLayout &DL = F.getParent()->getDataLayout();
CallBase *CB = cast<CallBase>(&I);
IRBuilder<> IRB(&I);
- InlineAsm *IA = cast<InlineAsm>(CB->getCalledValue());
+ InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
int OutputArgs = getNumOutputArgs(IA, CB);
// The last operand of a CallInst is the function itself.
int NumOperands = CB->getNumOperands() - 1;
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index d3eeb0252352..e3596beda7ef 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -786,7 +786,7 @@ void ModuleSanitizerCoverage::InjectCoverageForIndirectCalls(
for (auto I : IndirCalls) {
IRBuilder<> IRB(I);
CallBase &CB = cast<CallBase>(*I);
- Value *Callee = CB.getCalledValue();
+ Value *Callee = CB.getCalledOperand();
if (isa<InlineAsm>(Callee))
continue;
IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy));
diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
index 08195ee54ab0..d9542dcf028e 100644
--- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
+++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
@@ -59,7 +59,7 @@ public:
void run(std::vector<CandidateInfo> &Candidates) {
std::vector<Instruction *> Result = findIndirectCalls(F);
for (Instruction *I : Result) {
- Value *Callee = cast<CallBase>(I)->getCalledValue();
+ Value *Callee = cast<CallBase>(I)->getCalledOperand();
Instruction *InsertPt = I;
Instruction *AnnotatedInst = I;
Candidates.emplace_back(CandidateInfo{Callee, InsertPt, AnnotatedInst});
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index e4bc345819b1..652baa5d0587 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -381,7 +381,7 @@ static void analyzeParsePointLiveness(
dbgs() << " " << V->getName() << " " << *V << "\n";
}
if (PrintLiveSetSize) {
- dbgs() << "Safepoint For: " << Call->getCalledValue()->getName() << "\n";
+ dbgs() << "Safepoint For: " << Call->getCalledOperand()->getName() << "\n";
dbgs() << "Number live values: " << LiveSet.size() << "\n";
}
Result.LiveSet = LiveSet;
@@ -1481,7 +1481,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
assert(DeoptLowering.equals("live-through") && "Unsupported value!");
}
- Value *CallTarget = Call->getCalledValue();
+ Value *CallTarget = Call->getCalledOperand();
if (Function *F = dyn_cast<Function>(CallTarget)) {
if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize) {
// Calls to llvm.experimental.deoptimize are lowered to calls to the
diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
index d087717d1ecf..150c355281d3 100644
--- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
+++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
@@ -264,9 +264,9 @@ static CallBase &versionCallSite(CallBase &CB, Value *Callee,
// Create the compare. The called value and callee must have the same type to
// be compared.
- if (CB.getCalledValue()->getType() != Callee->getType())
- Callee = Builder.CreateBitCast(Callee, CB.getCalledValue()->getType());
- auto *Cond = Builder.CreateICmpEQ(CB.getCalledValue(), Callee);
+ if (CB.getCalledOperand()->getType() != Callee->getType())
+ Callee = Builder.CreateBitCast(Callee, CB.getCalledOperand()->getType());
+ auto *Cond = Builder.CreateICmpEQ(CB.getCalledOperand(), Callee);
// Create an if-then-else structure. The original instruction is moved into
// the "else" block, and a clone of the original instruction is placed in the
@@ -462,7 +462,7 @@ bool llvm::tryPromoteCall(CallBase &CB) {
assert(!CB.getCalledFunction());
Module *M = CB.getCaller()->getParent();
const DataLayout &DL = M->getDataLayout();
- Value *Callee = CB.getCalledValue();
+ Value *Callee = CB.getCalledOperand();
LoadInst *VTableEntryLoad = dyn_cast<LoadInst>(Callee);
if (!VTableEntryLoad)
diff --git a/llvm/lib/Transforms/Utils/Evaluator.cpp b/llvm/lib/Transforms/Utils/Evaluator.cpp
index 451a68e7e3ce..c5dfbf9d92d1 100644
--- a/llvm/lib/Transforms/Utils/Evaluator.cpp
+++ b/llvm/lib/Transforms/Utils/Evaluator.cpp
@@ -266,7 +266,7 @@ static Function *getFunction(Constant *C) {
Function *
Evaluator::getCalleeWithFormalArgs(CallBase &CB,
SmallVectorImpl<Constant *> &Formals) {
- auto *V = CB.getCalledValue();
+ auto *V = CB.getCalledOperand();
if (auto *Fn = getFunction(getVal(V)))
return getFormalParams(CB, Fn, Formals) ? Fn : nullptr;
@@ -486,7 +486,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
}
// Cannot handle inline asm.
- if (isa<InlineAsm>(CB.getCalledValue())) {
+ if (CB.isInlineAsm()) {
LLVM_DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
return false;
}
@@ -568,7 +568,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (Callee->isDeclaration()) {
// If this is a function we can constant fold, do it.
if (Constant *C = ConstantFoldCall(&CB, Callee, Formals, TLI)) {
- InstResult = castCallResultIfNeeded(CB.getCalledValue(), C);
+ InstResult = castCallResultIfNeeded(CB.getCalledOperand(), C);
if (!InstResult)
return false;
LLVM_DEBUG(dbgs() << "Constant folded function call. Result: "
@@ -591,7 +591,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
return false;
}
ValueStack.pop_back();
- InstResult = castCallResultIfNeeded(CB.getCalledValue(), RetVal);
+ InstResult = castCallResultIfNeeded(CB.getCalledOperand(), RetVal);
if (RetVal && !InstResult)
return false;
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 8f8f6995ce9b..fbf8c7b8cb40 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -534,7 +534,7 @@ static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
// instructions require no special handling.
CallInst *CI = dyn_cast<CallInst>(I);
- if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
+ if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
continue;
// We do not need to (and in fact, cannot) convert possibly throwing calls
@@ -2149,7 +2149,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// Skip call sites which are nounwind intrinsics.
auto *CalledFn =
- dyn_cast<Function>(I->getCalledValue()->stripPointerCasts());
+ dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
continue;
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index dd5a57124ab3..265d6d9337d9 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1963,7 +1963,7 @@ CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
SmallVector<OperandBundleDef, 1> OpBundles;
II->getOperandBundlesAsDefs(OpBundles);
CallInst *NewCall = CallInst::Create(II->getFunctionType(),
- II->getCalledValue(), Args, OpBundles);
+ II->getCalledOperand(), Args, OpBundles);
NewCall->setCallingConv(II->getCallingConv());
NewCall->setAttributes(II->getAttributes());
NewCall->setDebugLoc(II->getDebugLoc());
@@ -2014,7 +2014,7 @@ BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
// as of this time.
InvokeInst *II =
- InvokeInst::Create(CI->getFunctionType(), CI->getCalledValue(), Split,
+ InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
II->setDebugLoc(CI->getDebugLoc());
II->setCallingConv(CI->getCallingConv());
@@ -2045,7 +2045,7 @@ static bool markAliveBlocks(Function &F,
// canonicalizes unreachable insts into stores to null or undef.
for (Instruction &I : *BB) {
if (auto *CI = dyn_cast<CallInst>(&I)) {
- Value *Callee = CI->getCalledValue();
+ Value *Callee = CI->getCalledOperand();
// Handle intrinsic calls.
if (Function *F = dyn_cast<Function>(Callee)) {
auto IntrinsicID = F->getIntrinsicID();
@@ -2120,7 +2120,7 @@ static bool markAliveBlocks(Function &F,
Instruction *Terminator = BB->getTerminator();
if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
// Turn invokes that call 'nounwind' functions into ordinary calls.
- Value *Callee = II->getCalledValue();
+ Value *Callee = II->getCalledOperand();
if ((isa<ConstantPointerNull>(Callee) &&
!NullPointerIsDefined(BB->getParent())) ||
isa<UndefValue>(Callee)) {
diff --git a/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index 1af0ce3d86cc..0b225e8abc4e 100644
--- a/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -53,7 +53,7 @@ static bool runImpl(Function &F) {
II->getOperandBundlesAsDefs(OpBundles);
// Insert a normal call instruction...
CallInst *NewCall =
- CallInst::Create(II->getFunctionType(), II->getCalledValue(),
+ CallInst::Create(II->getFunctionType(), II->getCalledOperand(),
CallArgs, OpBundles, "", II);
NewCall->takeName(II);
NewCall->setCallingConv(II->getCallingConv());
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index be5a375a2cfa..6b6526bd265b 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -6103,7 +6103,7 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) {
// A call to null is undefined.
if (auto *CB = dyn_cast<CallBase>(Use))
return !NullPointerIsDefined(CB->getFunction()) &&
- CB->getCalledValue() == I;
+ CB->getCalledOperand() == I;
}
return false;
}
diff --git a/llvm/test/LTO/X86/type-mapping-bug3.ll b/llvm/test/LTO/X86/type-mapping-bug3.ll
index 2f845a56a929..6d9e81d6cdcd 100644
--- a/llvm/test/LTO/X86/type-mapping-bug3.ll
+++ b/llvm/test/LTO/X86/type-mapping-bug3.ll
@@ -25,7 +25,7 @@ define void @b() {
entry:
%f.addr = alloca %"T3"*load %"T3"*, %"T3"** %f.addr
- ; The call with the getCalledValue() vs getCalledFunction() mismatch.
+ ; The call with the getCalledOperand() vs getCalledFunction() mismatch.
call void @d(%"T3"* %0)
unreachable
}
diff --git a/llvm/tools/llvm-
diff /DiffConsumer.cpp b/llvm/tools/llvm-
diff /DiffConsumer.cpp
index b797143bde1b..6228ff2bae98 100644
--- a/llvm/tools/llvm-
diff /DiffConsumer.cpp
+++ b/llvm/tools/llvm-
diff /DiffConsumer.cpp
@@ -50,15 +50,15 @@ void DiffConsumer::printValue(Value *V, bool isL) {
return;
}
if (V->getType()->isVoidTy()) {
- if (isa<StoreInst>(V)) {
+ if (auto *SI = dyn_cast<StoreInst>(V)) {
out << "store to ";
- printValue(cast<StoreInst>(V)->getPointerOperand(), isL);
- } else if (isa<CallInst>(V)) {
+ printValue(SI->getPointerOperand(), isL);
+ } else if (auto *CI = dyn_cast<CallInst>(V)) {
out << "call to ";
- printValue(cast<CallInst>(V)->getCalledValue(), isL);
- } else if (isa<InvokeInst>(V)) {
+ printValue(CI->getCalledOperand(), isL);
+ } else if (auto *II = dyn_cast<InvokeInst>(V)) {
out << "invoke to ";
- printValue(cast<InvokeInst>(V)->getCalledValue(), isL);
+ printValue(II->getCalledOperand(), isL);
} else {
out << *V;
}
diff --git a/llvm/tools/llvm-
diff /DifferenceEngine.cpp b/llvm/tools/llvm-
diff /DifferenceEngine.cpp
index 91befdbe6419..e54217917e5b 100644
--- a/llvm/tools/llvm-
diff /DifferenceEngine.cpp
+++ b/llvm/tools/llvm-
diff /DifferenceEngine.cpp
@@ -224,7 +224,7 @@ class FunctionDifferenceEngine {
bool
diff CallSites(CallBase &L, CallBase &R, bool Complain) {
// FIXME: call attributes
- if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) {
+ if (!equivalentAsOperands(L.getCalledOperand(), R.getCalledOperand())) {
if (Complain) Engine.log("called functions
diff er");
return true;
}
@@ -638,7 +638,8 @@ void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart,
if (!isa<CallInst>(*I)) return;
CallInst *LCall = cast<CallInst>(&*I);
InvokeInst *RInvoke = cast<InvokeInst>(RTerm);
- if (!equivalentAsOperands(LCall->getCalledValue(), RInvoke->getCalledValue()))
+ if (!equivalentAsOperands(LCall->getCalledOperand(),
+ RInvoke->getCalledOperand()))
return;
if (!LCall->use_empty())
Values[LCall] = RInvoke;
@@ -651,7 +652,8 @@ void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart,
if (!isa<CallInst>(*I)) return;
CallInst *RCall = cast<CallInst>(I);
InvokeInst *LInvoke = cast<InvokeInst>(LTerm);
- if (!equivalentAsOperands(LInvoke->getCalledValue(), RCall->getCalledValue()))
+ if (!equivalentAsOperands(LInvoke->getCalledOperand(),
+ RCall->getCalledOperand()))
return;
if (!LInvoke->use_empty())
Values[LInvoke] = RCall;
diff --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index ffc47fca24d9..5856c60c64d9 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -720,7 +720,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
op = b.create<CallOp>(loc, tys, b.getSymbolRefAttr(callee->getName()),
ops);
} else {
- Value calledValue = processValue(ci->getCalledValue());
+ Value calledValue = processValue(ci->getCalledOperand());
if (!calledValue)
return failure();
ops.insert(ops.begin(), calledValue);
@@ -766,7 +766,7 @@ LogicalResult Importer::processInstruction(llvm::Instruction *inst) {
ops, blocks[ii->getNormalDest()], normalArgs,
blocks[ii->getUnwindDest()], unwindArgs);
} else {
- ops.insert(ops.begin(), processValue(ii->getCalledValue()));
+ ops.insert(ops.begin(), processValue(ii->getCalledOperand()));
op = b.create<InvokeOp>(loc, tys, ops, blocks[ii->getNormalDest()],
normalArgs, blocks[ii->getUnwindDest()],
unwindArgs);
More information about the lldb-commits
mailing list