[polly] fa789df - [NFC] Rename `Intrinsic::getDeclaration` to `getOrInsertDeclaration` (#111752)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 11 05:26:09 PDT 2024
Author: Rahul Joshi
Date: 2024-10-11T05:26:03-07:00
New Revision: fa789dffb1e12c2aece0187aeacc48dfb1768340
URL: https://github.com/llvm/llvm-project/commit/fa789dffb1e12c2aece0187aeacc48dfb1768340
DIFF: https://github.com/llvm/llvm-project/commit/fa789dffb1e12c2aece0187aeacc48dfb1768340.diff
LOG: [NFC] Rename `Intrinsic::getDeclaration` to `getOrInsertDeclaration` (#111752)
Rename the function to reflect its correct behavior and to be consistent
with `Module::getOrInsertFunction`. This is also in preparation of
adding a new `Intrinsic::getDeclaration` that will have behavior similar
to `Module::getFunction` (i.e, just lookup, no creation).
Added:
Modified:
clang/lib/CodeGen/CGBuiltin.cpp
clang/lib/CodeGen/CGDecl.cpp
clang/lib/CodeGen/CGException.cpp
clang/lib/CodeGen/CodeGenFunction.cpp
clang/lib/CodeGen/CodeGenModule.cpp
clang/lib/CodeGen/Targets/SystemZ.cpp
llvm/examples/BrainF/BrainF.cpp
llvm/include/llvm-c/Core.h
llvm/include/llvm/IR/IntrinsicInst.h
llvm/include/llvm/IR/Intrinsics.h
llvm/include/llvm/IR/MatrixBuilder.h
llvm/lib/AsmParser/LLParser.cpp
llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
llvm/lib/CodeGen/ExpandMemCmp.cpp
llvm/lib/CodeGen/ExpandVectorPredication.cpp
llvm/lib/CodeGen/HardwareLoops.cpp
llvm/lib/CodeGen/IntrinsicLowering.cpp
llvm/lib/CodeGen/SafeStack.cpp
llvm/lib/CodeGen/SjLjEHPrepare.cpp
llvm/lib/CodeGen/StackProtector.cpp
llvm/lib/CodeGen/WasmEHPrepare.cpp
llvm/lib/IR/AutoUpgrade.cpp
llvm/lib/IR/Core.cpp
llvm/lib/IR/DIBuilder.cpp
llvm/lib/IR/DebugProgramInstruction.cpp
llvm/lib/IR/IRBuilder.cpp
llvm/lib/IR/IntrinsicInst.cpp
llvm/lib/IR/Intrinsics.cpp
llvm/lib/IR/Module.cpp
llvm/lib/IR/VectorBuilder.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64StackTagging.cpp
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
llvm/lib/Target/AArch64/SMEABIPass.cpp
llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/ARMParallelDSP.cpp
llvm/lib/Target/ARM/MVETailPredication.cpp
llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
llvm/lib/Target/BPF/BPFAdjustOpt.cpp
llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp
llvm/lib/Target/DirectX/DXILOpLowering.cpp
llvm/lib/Target/Hexagon/HexagonGenExtract.cpp
llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
llvm/lib/Target/SystemZ/SystemZTDC.cpp
llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
llvm/lib/Target/X86/X86PartialReduction.cpp
llvm/lib/Target/X86/X86WinEHState.cpp
llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
llvm/lib/Transforms/Coroutines/Coroutines.cpp
llvm/lib/Transforms/IPO/CrossDSOCFI.cpp
llvm/lib/Transforms/IPO/SampleProfileProbe.cpp
llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
llvm/lib/Transforms/Instrumentation/KCFI.cpp
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
llvm/lib/Transforms/Scalar/LoopFlatten.cpp
llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
llvm/lib/Transforms/Scalar/LowerGuardIntrinsic.cpp
llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
llvm/lib/Transforms/Scalar/MakeGuardsExplicit.cpp
llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
llvm/lib/Transforms/Scalar/Scalarizer.cpp
llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
llvm/lib/Transforms/Utils/CloneFunction.cpp
llvm/lib/Transforms/Utils/CodeExtractor.cpp
llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp
llvm/lib/Transforms/Utils/InlineFunction.cpp
llvm/lib/Transforms/Utils/IntegerDivision.cpp
llvm/lib/Transforms/Utils/Local.cpp
llvm/lib/Transforms/Utils/LowerGlobalDtors.cpp
llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
llvm/lib/Transforms/Utils/PredicateInfo.cpp
llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp
llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
llvm/unittests/Analysis/MemorySSATest.cpp
llvm/unittests/Analysis/ValueTrackingTest.cpp
llvm/unittests/IR/BasicBlockTest.cpp
llvm/unittests/IR/DebugInfoTest.cpp
llvm/unittests/IR/IRBuilderTest.cpp
llvm/unittests/IR/IntrinsicsTest.cpp
llvm/unittests/IR/PatternMatch.cpp
llvm/unittests/IR/VPIntrinsicTest.cpp
llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
polly/lib/CodeGen/IslExprBuilder.cpp
polly/lib/CodeGen/PerfMonitor.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index ff678ee04f9c2a..059c75fae284dd 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -13648,7 +13648,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
// Built the IR for the preserve_field_info intrinsic.
- llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
+ llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
{FieldAddr->getType()});
return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
@@ -13670,10 +13670,10 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
llvm::Function *FnDecl;
if (BuiltinID == BPF::BI__builtin_btf_type_id)
- FnDecl = llvm::Intrinsic::getDeclaration(
+ FnDecl = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
else
- FnDecl = llvm::Intrinsic::getDeclaration(
+ FnDecl = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
@@ -13708,7 +13708,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
- llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
+ llvm::Function *IntrinsicFn = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
CallInst *Fn =
Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
@@ -18895,7 +18895,8 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: {
}
case Builtin::BI__builtin_hlsl_wave_is_first_lane: {
Intrinsic::ID ID = CGM.getHLSLRuntime().getWaveIsFirstLaneIntrinsic();
- return EmitRuntimeCall(Intrinsic::getDeclaration(&CGM.getModule(), ID));
+ return EmitRuntimeCall(
+ Intrinsic::getOrInsertDeclaration(&CGM.getModule(), ID));
}
case Builtin::BI__builtin_hlsl_elementwise_sign: {
auto *Arg0 = E->getArg(0);
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 563f728e29d781..30af9268b30e2e 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -2509,8 +2509,8 @@ void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
if (LifetimeStartFn)
return LifetimeStartFn;
- LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
+ LifetimeStartFn = llvm::Intrinsic::getOrInsertDeclaration(
+ &getModule(), llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
return LifetimeStartFn;
}
@@ -2518,8 +2518,8 @@ llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
if (LifetimeEndFn)
return LifetimeEndFn;
- LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
+ LifetimeEndFn = llvm::Intrinsic::getOrInsertDeclaration(
+ &getModule(), llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
return LifetimeEndFn;
}
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index bb2ed237ee9f35..44a45413dbc45a 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -1843,7 +1843,7 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
std::make_pair(ParentAlloca, ParentCGF.EscapedLocals.size()));
int FrameEscapeIdx = InsertPair.first->second;
// call ptr @llvm.localrecover(ptr @parentFn, ptr %fp, i32 N)
- llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
+ llvm::Function *FrameRecoverFn = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::localrecover);
RecoverCall = Builder.CreateCall(
FrameRecoverFn, {ParentCGF.CurFn, ParentFP,
@@ -1942,7 +1942,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
// %1 = call ptr @llvm.localrecover(@"?fin$0 at 0@main@@",..)
// %2 = load ptr, ptr %1, align 8
// ==> %2 is the frame-pointer of outermost host function
- llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
+ llvm::Function *FrameRecoverFn = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::localrecover);
ParentFP = Builder.CreateCall(
FrameRecoverFn, {ParentCGF.CurFn, ParentFP,
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index e1fd9b72b8d7b2..f3023c7a20c405 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -463,7 +463,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
EscapeArgs.resize(EscapedLocals.size());
for (auto &Pair : EscapedLocals)
EscapeArgs[Pair.second] = Pair.first;
- llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
+ llvm::Function *FrameEscapeFn = llvm::Intrinsic::getOrInsertDeclaration(
&CGM.getModule(), llvm::Intrinsic::localescape);
CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
}
@@ -3130,7 +3130,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
llvm::Instruction *Assumption) {
assert(isa_and_nonnull<llvm::CallInst>(Assumption) &&
cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
- llvm::Intrinsic::getDeclaration(
+ llvm::Intrinsic::getOrInsertDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
llvm::Intrinsic::assume) &&
"Assumption should be a call to llvm.assume().");
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 5ba098144a74e7..7a7dea4668ad09 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -6218,8 +6218,8 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
ArrayRef<llvm::Type*> Tys) {
- return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
- Tys);
+ return llvm::Intrinsic::getOrInsertDeclaration(&getModule(),
+ (llvm::Intrinsic::ID)IID, Tys);
}
static llvm::StringMapEntry<llvm::GlobalVariable *> &
diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp
index 56129622f48dbd..23c96fa5cf98cb 100644
--- a/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -110,8 +110,8 @@ class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
llvm::Module &M = CGM.getModule();
auto &Ctx = M.getContext();
- llvm::Function *TDCFunc =
- llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
+ llvm::Function *TDCFunc = llvm::Intrinsic::getOrInsertDeclaration(
+ &M, llvm::Intrinsic::s390_tdc, Ty);
unsigned TDCBits = 0;
switch (BuiltinID) {
case Builtin::BI__builtin_isnan:
diff --git a/llvm/examples/BrainF/BrainF.cpp b/llvm/examples/BrainF/BrainF.cpp
index ac01961735e137..e62cc7bd591a3f 100644
--- a/llvm/examples/BrainF/BrainF.cpp
+++ b/llvm/examples/BrainF/BrainF.cpp
@@ -67,8 +67,8 @@ void BrainF::header(LLVMContext& C) {
//declare void @llvm.memset.p0i8.i32(i8 *, i8, i32, i1)
Type *Tys[] = {PointerType::getUnqual(C), Type::getInt32Ty(C)};
- Function *memset_func = Intrinsic::getDeclaration(module, Intrinsic::memset,
- Tys);
+ Function *memset_func =
+ Intrinsic::getOrInsertDeclaration(module, Intrinsic::memset, Tys);
//declare i32 @getchar()
getchar_func =
diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h
index 28dc270ca368d2..55649d89a6b8f4 100644
--- a/llvm/include/llvm-c/Core.h
+++ b/llvm/include/llvm-c/Core.h
@@ -2807,10 +2807,10 @@ unsigned LLVMLookupIntrinsicID(const char *Name, size_t NameLen);
unsigned LLVMGetIntrinsicID(LLVMValueRef Fn);
/**
- * Create or insert the declaration of an intrinsic. For overloaded intrinsics,
+ * Get or insert the declaration of an intrinsic. For overloaded intrinsics,
* parameter types must be provided to uniquely identify an overload.
*
- * @see llvm::Intrinsic::getDeclaration()
+ * @see llvm::Intrinsic::getOrInsertDeclaration()
*/
LLVMValueRef LLVMGetIntrinsicDeclaration(LLVMModuleRef Mod,
unsigned ID,
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 4458126ffa759d..920eed01374c83 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -568,9 +568,9 @@ class VPIntrinsic : public IntrinsicInst {
/// \brief Declares a llvm.vp.* intrinsic in \p M that matches the parameters
/// \p Params. Additionally, the load and gather intrinsics require
/// \p ReturnType to be specified.
- static Function *getDeclarationForParams(Module *M, Intrinsic::ID,
- Type *ReturnType,
- ArrayRef<Value *> Params);
+ static Function *getOrInsertDeclarationForParams(Module *M, Intrinsic::ID,
+ Type *ReturnType,
+ ArrayRef<Value *> Params);
static std::optional<unsigned> getMaskParamPos(Intrinsic::ID IntrinsicID);
static std::optional<unsigned> getVectorLengthParamPos(
diff --git a/llvm/include/llvm/IR/Intrinsics.h b/llvm/include/llvm/IR/Intrinsics.h
index b251036247c5c0..8c37925732a83a 100644
--- a/llvm/include/llvm/IR/Intrinsics.h
+++ b/llvm/include/llvm/IR/Intrinsics.h
@@ -87,14 +87,15 @@ namespace Intrinsic {
/// Return the attributes for an intrinsic.
AttributeList getAttributes(LLVMContext &C, ID id);
- /// Create or insert an LLVM Function declaration for an intrinsic, and return
- /// it.
+ /// Look up the Function declaration of the intrinsic \p id in the Module
+ /// \p M. If it does not exist, add a declaration and return it. Otherwise,
+ /// return the existing declaration.
///
- /// The Tys parameter is for intrinsics with overloaded types (e.g., those
+ /// The \p Tys parameter is for intrinsics with overloaded types (e.g., those
/// using iAny, fAny, vAny, or iPTRAny). For a declaration of an overloaded
/// intrinsic, Tys must provide exactly one type for each overloaded type in
/// the intrinsic.
- Function *getDeclaration(Module *M, ID id, ArrayRef<Type *> Tys = {});
+ Function *getOrInsertDeclaration(Module *M, ID id, ArrayRef<Type *> Tys = {});
/// Looks up Name in NameTable via binary search. NameTable must be sorted
/// and all entries must start with "llvm.". If NameTable contains an exact
diff --git a/llvm/include/llvm/IR/MatrixBuilder.h b/llvm/include/llvm/IR/MatrixBuilder.h
index dbf2cfb7c5e966..3a04ca87f2b558 100644
--- a/llvm/include/llvm/IR/MatrixBuilder.h
+++ b/llvm/include/llvm/IR/MatrixBuilder.h
@@ -72,7 +72,7 @@ class MatrixBuilder {
B.getInt32(Columns)};
Type *OverloadedTypes[] = {RetType, Stride->getType()};
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
getModule(), Intrinsic::matrix_column_major_load, OverloadedTypes);
CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
@@ -95,7 +95,7 @@ class MatrixBuilder {
B.getInt32(Rows), B.getInt32(Columns)};
Type *OverloadedTypes[] = {Matrix->getType(), Stride->getType()};
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
getModule(), Intrinsic::matrix_column_major_store, OverloadedTypes);
CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
@@ -115,7 +115,7 @@ class MatrixBuilder {
Type *OverloadedTypes[] = {ReturnType};
Value *Ops[] = {Matrix, B.getInt32(Rows), B.getInt32(Columns)};
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
getModule(), Intrinsic::matrix_transpose, OverloadedTypes);
return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
@@ -136,7 +136,7 @@ class MatrixBuilder {
B.getInt32(RHSColumns)};
Type *OverloadedTypes[] = {ReturnType, LHSType, RHSType};
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
getModule(), Intrinsic::matrix_multiply, OverloadedTypes);
return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
}
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index c3b4a8235ce637..5b9bddeb7cfe82 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -360,7 +360,7 @@ bool LLParser::validateEndOfModule(bool UpgradeDebugInfo) {
OverloadTys))
return error(Info.second, "invalid intrinsic signature");
- U.set(Intrinsic::getDeclaration(M, IID, OverloadTys));
+ U.set(Intrinsic::getOrInsertDeclaration(M, IID, OverloadTys));
}
Info.first->eraseFromParent();
diff --git a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
index 11f123aa5bed85..0a3d0cf8ec9300 100644
--- a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
+++ b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
@@ -356,7 +356,7 @@ static void expandIToFP(Instruction *IToFP) {
Entry->getTerminator()->eraseFromParent();
Function *CTLZ =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, IntTy);
+ Intrinsic::getOrInsertDeclaration(F->getParent(), Intrinsic::ctlz, IntTy);
ConstantInt *True = Builder.getTrue();
// entry:
diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
index 04222d5b4afd4c..6d626de0b4e635 100644
--- a/llvm/lib/CodeGen/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -355,7 +355,7 @@ MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
// Swap bytes if required.
if (BSwapSizeType) {
- Function *Bswap = Intrinsic::getDeclaration(
+ Function *Bswap = Intrinsic::getOrInsertDeclaration(
CI->getModule(), Intrinsic::bswap, BSwapSizeType);
Lhs = Builder.CreateCall(Bswap, Lhs);
Rhs = Builder.CreateCall(Bswap, Rhs);
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index ffe879ff049648..32ba3e91822ddb 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -237,7 +237,7 @@ Value *CachingVPExpander::convertEVLToMask(IRBuilder<> &Builder,
if (ElemCount.isScalable()) {
auto *M = Builder.GetInsertBlock()->getModule();
Type *BoolVecTy = VectorType::get(Builder.getInt1Ty(), ElemCount);
- Function *ActiveMaskFunc = Intrinsic::getDeclaration(
+ Function *ActiveMaskFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::get_active_lane_mask, {BoolVecTy, EVLParam->getType()});
// `get_active_lane_mask` performs an implicit less-than comparison.
Value *ConstZero = Builder.getInt32(0);
@@ -299,7 +299,7 @@ Value *CachingVPExpander::expandPredicationToIntCall(
case Intrinsic::umin: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
replaceOperation(*NewOp, VPI);
@@ -308,7 +308,7 @@ Value *CachingVPExpander::expandPredicationToIntCall(
case Intrinsic::bswap:
case Intrinsic::bitreverse: {
Value *Op = VPI.getOperand(0);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op}, VPI.getName());
replaceOperation(*NewOp, VPI);
@@ -327,7 +327,7 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::fabs:
case Intrinsic::sqrt: {
Value *Op0 = VPI.getOperand(0);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0}, VPI.getName());
replaceOperation(*NewOp, VPI);
@@ -337,7 +337,7 @@ Value *CachingVPExpander::expandPredicationToFPCall(
case Intrinsic::minnum: {
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp = Builder.CreateCall(Fn, {Op0, Op1}, VPI.getName());
replaceOperation(*NewOp, VPI);
@@ -350,7 +350,7 @@ Value *CachingVPExpander::expandPredicationToFPCall(
Value *Op0 = VPI.getOperand(0);
Value *Op1 = VPI.getOperand(1);
Value *Op2 = VPI.getOperand(2);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
VPI.getModule(), UnpredicatedIntrinsicID, {VPI.getType()});
Value *NewOp;
if (Intrinsic::isConstrainedFPIntrinsic(UnpredicatedIntrinsicID))
@@ -594,7 +594,7 @@ bool CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
// TODO add caching
auto *M = VPI.getModule();
Function *VScaleFunc =
- Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale, Int32Ty);
IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index 9205eabcf5684e..c8a63304a3b63b 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -512,7 +512,7 @@ Value* HardwareLoop::InsertIterationSetup(Value *LoopCountInit) {
: Intrinsic::test_set_loop_iterations)
: (UsePhi ? Intrinsic::start_loop_iterations
: Intrinsic::set_loop_iterations);
- Function *LoopIter = Intrinsic::getDeclaration(M, ID, Ty);
+ Function *LoopIter = Intrinsic::getOrInsertDeclaration(M, ID, Ty);
Value *LoopSetup = Builder.CreateCall(LoopIter, LoopCountInit);
// Use the return value of the intrinsic to control the entry of the loop.
@@ -541,9 +541,8 @@ void HardwareLoop::InsertLoopDec() {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
- Function *DecFunc =
- Intrinsic::getDeclaration(M, Intrinsic::loop_decrement,
- LoopDecrement->getType());
+ Function *DecFunc = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::loop_decrement, LoopDecrement->getType());
Value *Ops[] = { LoopDecrement };
Value *NewCond = CondBuilder.CreateCall(DecFunc, Ops);
Value *OldCond = ExitBranch->getCondition();
@@ -566,9 +565,8 @@ Instruction* HardwareLoop::InsertLoopRegDec(Value *EltsRem) {
Attribute::StrictFP))
CondBuilder.setIsFPConstrained(true);
- Function *DecFunc =
- Intrinsic::getDeclaration(M, Intrinsic::loop_decrement_reg,
- { EltsRem->getType() });
+ Function *DecFunc = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::loop_decrement_reg, {EltsRem->getType()});
Value *Ops[] = { EltsRem, LoopDecrement };
Value *Call = CondBuilder.CreateCall(DecFunc, Ops);
diff --git a/llvm/lib/CodeGen/IntrinsicLowering.cpp b/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 256c081b46e262..f799a8cfc1ba7e 100644
--- a/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -474,7 +474,7 @@ bool IntrinsicLowering::LowerToByteSwap(CallInst *CI) {
// Okay, we can do this xform, do so now.
Module *M = CI->getModule();
- Function *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Ty);
+ Function *Int = Intrinsic::getOrInsertDeclaration(M, Intrinsic::bswap, Ty);
Value *Op = CI->getArgOperand(0);
Op = CallInst::Create(Int, Op, CI->getName(), CI->getIterator());
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index e41d1bfb0e530d..a50909af8bfcfb 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -368,7 +368,8 @@ Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
if (!StackGuardVar) {
TL.insertSSPDeclarations(*M);
- return IRB.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
+ return IRB.CreateCall(
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::stackguard));
}
return IRB.CreateLoad(StackPtrTy, StackGuardVar, "StackGuard");
diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 054f7d7215962e..c4ad9f0b2172fc 100644
--- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -508,17 +508,19 @@ bool SjLjEHPrepareImpl::runOnFunction(Function &F) {
PointerType *AllocaPtrTy = M.getDataLayout().getAllocaPtrType(M.getContext());
- FrameAddrFn =
- Intrinsic::getDeclaration(&M, Intrinsic::frameaddress, {AllocaPtrTy});
- StackAddrFn =
- Intrinsic::getDeclaration(&M, Intrinsic::stacksave, {AllocaPtrTy});
- StackRestoreFn =
- Intrinsic::getDeclaration(&M, Intrinsic::stackrestore, {AllocaPtrTy});
+ FrameAddrFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::frameaddress,
+ {AllocaPtrTy});
+ StackAddrFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::stacksave,
+ {AllocaPtrTy});
+ StackRestoreFn = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::stackrestore, {AllocaPtrTy});
BuiltinSetupDispatchFn =
- Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setup_dispatch);
- LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
- CallSiteFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_callsite);
- FuncCtxFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::eh_sjlj_setup_dispatch);
+ LSDAAddrFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::eh_sjlj_lsda);
+ CallSiteFn =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::eh_sjlj_callsite);
+ FuncCtxFn =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::eh_sjlj_functioncontext);
bool Res = setupEntryBlockAndCallSites(F);
return Res;
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index 1f23838b2de0ca..a192161bbd9481 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -519,7 +519,8 @@ static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
if (SupportsSelectionDAGSP)
*SupportsSelectionDAGSP = true;
TLI->insertSSPDeclarations(*M);
- return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackguard));
+ return B.CreateCall(
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::stackguard));
}
/// Insert code into the entry block that stores the stack guard
@@ -540,7 +541,7 @@ static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
Value *GuardSlot = getStackGuard(TLI, M, B, &SupportsSelectionDAGSP);
- B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
+ B.CreateCall(Intrinsic::getOrInsertDeclaration(M, Intrinsic::stackprotector),
{GuardSlot, AI});
return SupportsSelectionDAGSP;
}
diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp
index 7514d49fb18a98..1701b0d04425d2 100644
--- a/llvm/lib/CodeGen/WasmEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp
@@ -196,7 +196,7 @@ bool WasmEHPrepareImpl::prepareThrows(Function &F) {
bool Changed = false;
// wasm.throw() intinsic, which will be lowered to wasm 'throw' instruction.
- ThrowF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_throw);
+ ThrowF = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_throw);
// Insert an unreachable instruction after a call to @llvm.wasm.throw and
// delete all following instructions within the BB, and delete all the dead
// children of the BB as well.
@@ -260,18 +260,21 @@ bool WasmEHPrepareImpl::prepareEHPads(Function &F) {
0, 2, "selector_gep");
// wasm.landingpad.index() intrinsic, which is to specify landingpad index
- LPadIndexF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_landingpad_index);
+ LPadIndexF =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_landingpad_index);
// wasm.lsda() intrinsic. Returns the address of LSDA table for the current
// function.
- LSDAF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_lsda);
+ LSDAF = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_lsda);
// wasm.get.exception() and wasm.get.ehselector() intrinsics. Calls to these
// are generated in clang.
- GetExnF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_get_exception);
- GetSelectorF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_get_ehselector);
+ GetExnF =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_get_exception);
+ GetSelectorF =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_get_ehselector);
// wasm.catch() will be lowered down to wasm 'catch' instruction in
// instruction selection.
- CatchF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_catch);
+ CatchF = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_catch);
// _Unwind_CallPersonality() wrapper function, which calls the personality
CallPersonalityF = M.getOrInsertFunction("_Unwind_CallPersonality",
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 215bfc8c6cfe3e..477b77a6dd5335 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -65,7 +65,7 @@ static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID,
// Yes, it's old, replace it with new version.
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
return true;
}
@@ -81,7 +81,7 @@ static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
// Move this function aside and map down.
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
return true;
}
@@ -94,7 +94,7 @@ static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
return false;
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
return true;
}
@@ -104,7 +104,7 @@ static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
return false;
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
return true;
}
@@ -114,7 +114,7 @@ static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
return false;
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
return true;
}
@@ -502,8 +502,8 @@ static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
return false;
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::x86_rdtscp);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::x86_rdtscp);
return true;
}
@@ -609,14 +609,15 @@ static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
if (ID != Intrinsic::not_intrinsic) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
return false; // No other 'x86.xop.*'
}
if (Name == "seh.recoverfp") {
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::eh_recoverfp);
return true;
}
@@ -630,15 +631,15 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
Function *&NewFn) {
if (Name.starts_with("rbit")) {
// '(arm|aarch64).rbit'.
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse,
- F->arg_begin()->getType());
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::bitreverse, F->arg_begin()->getType());
return true;
}
if (Name == "thread.pointer") {
// '(arm|aarch64).thread.pointer'.
- NewFn =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::thread_pointer);
return true;
}
@@ -663,7 +664,7 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
std::array<Type *, 2> Tys{
{F->getReturnType(),
FixedVectorType::get(Type::getBFloatTy(Ctx), OperandWidth / 16)}};
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
return true;
}
return false; // No other '(arm|aarch64).neon.bfdot.*'.
@@ -688,7 +689,7 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
: (Intrinsic::ID)Intrinsic::aarch64_neon_bfmlalt)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
return false; // No other '(arm|aarch64).neon.bfm*.v16i8'.
@@ -712,8 +713,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
.StartsWith("vqsubu.", Intrinsic::usub_sat)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
- F->arg_begin()->getType());
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
+ F->arg_begin()->getType());
return true;
}
@@ -733,10 +734,10 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
auto fArgs = F->getFunctionType()->params();
Type *Tys[] = {fArgs[0], fArgs[1]};
if (Groups[1].size() == 1)
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- StoreInts[fArgs.size() - 3], Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), StoreInts[fArgs.size() - 3], Tys);
else
- NewFn = Intrinsic::getDeclaration(
+ NewFn = Intrinsic::getOrInsertDeclaration(
F->getParent(), StoreLaneInts[fArgs.size() - 5], Tys);
return true;
}
@@ -810,8 +811,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
.StartsWith("rbit", Intrinsic::bitreverse)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
- F->arg_begin()->getType());
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
+ F->arg_begin()->getType());
return true;
}
@@ -821,8 +822,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
return false; // Invalid IR.
VectorType *Ty = dyn_cast<VectorType>(F->getReturnType());
if (Ty && Ty->getElementType()->isFloatingPointTy()) {
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::aarch64_neon_faddp, Ty);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::aarch64_neon_faddp, Ty);
return true;
}
}
@@ -840,7 +841,7 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
.Case("mlalt", Intrinsic::aarch64_sve_bfmlalt_lane_v2)
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
return false; // No other 'aarch64.sve.bf*.lane'.
@@ -861,8 +862,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
auto Args = F->getFunctionType()->params();
Type *Tys[] = {F->getReturnType(), Args[1]};
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::aarch64_sve_faddqv, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::aarch64_sve_faddqv, Tys);
return true;
}
@@ -880,8 +881,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
Intrinsic::aarch64_sve_ld3_sret,
Intrinsic::aarch64_sve_ld4_sret,
};
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- LoadIDs[Name[0] - '2'], Ty);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ LoadIDs[Name[0] - '2'], Ty);
return true;
}
return false; // No other 'aarch64.sve.ld*'.
@@ -892,8 +893,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
if (Name.starts_with("get")) {
// 'aarch64.sve.tuple.get*'.
Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()};
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::vector_extract, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::vector_extract, Tys);
return true;
}
@@ -901,8 +902,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
// 'aarch64.sve.tuple.set*'.
auto Args = F->getFunctionType()->params();
Type *Tys[] = {Args[0], Args[2], Args[1]};
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::vector_insert, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::vector_insert, Tys);
return true;
}
@@ -911,8 +912,8 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
// 'aarch64.sve.tuple.create*'.
auto Args = F->getFunctionType()->params();
Type *Tys[] = {F->getReturnType(), Args[1]};
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::vector_insert, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::vector_insert, Tys);
return true;
}
return false; // No other 'aarch64.sve.tuple.*'.
@@ -1026,8 +1027,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (Name.consume_front("amdgcn.")) {
if (Name == "alignbit") {
// Target specific intrinsic became redundant
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::fshr,
- {F->getReturnType()});
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::fshr, {F->getReturnType()});
return true;
}
@@ -1056,9 +1057,9 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (Name.starts_with("ldexp.")) {
// Target specific intrinsic became redundant
- NewFn = Intrinsic::getDeclaration(
- F->getParent(), Intrinsic::ldexp,
- {F->getReturnType(), F->getArg(1)->getType()});
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::ldexp,
+ {F->getReturnType(), F->getArg(1)->getType()});
return true;
}
break; // No other 'amdgcn.*'
@@ -1074,15 +1075,16 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
- F->arg_begin()->getType());
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
+ F->arg_begin()->getType());
return true;
}
}
if (F->arg_size() == 2 && Name == "coro.end") {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::coro_end);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::coro_end);
return true;
}
@@ -1105,7 +1107,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
// converted to DbgVariableRecords later.
if (Name == "addr" || (Name == "value" && F->arg_size() == 4)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::dbg_value);
return true;
}
break; // No other 'dbg.*'.
@@ -1135,7 +1138,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
// Inserting overloads the inserted type.
Tys.push_back(FT->getParamType(1));
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
return true;
}
@@ -1171,8 +1174,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (ID != Intrinsic::not_intrinsic) {
rename(F);
auto Args = F->getFunctionType()->params();
- NewFn =
- Intrinsic::getDeclaration(F->getParent(), ID, {Args[V2 ? 1 : 0]});
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
+ {Args[V2 ? 1 : 0]});
return true;
}
break; // No other 'expermental.vector.reduce.*'.
@@ -1182,15 +1185,16 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (Name.consume_front("experimental.stepvector.")) {
Intrinsic::ID ID = Intrinsic::stepvector;
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
- F->getFunctionType()->getReturnType());
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), ID, F->getFunctionType()->getReturnType());
return true;
}
break; // No other 'e*'.
case 'f':
if (Name.starts_with("flt.rounds")) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::get_rounding);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::get_rounding);
return true;
}
break;
@@ -1200,8 +1204,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
auto Args = F->getFunctionType()->params();
Type* ObjectPtr[1] = {Args[0]};
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::launder_invariant_group, ObjectPtr);
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::launder_invariant_group, ObjectPtr);
return true;
}
break;
@@ -1218,7 +1222,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
// Get the types of dest, src, and len
ArrayRef<Type *> ParamTypes =
F->getFunctionType()->params().slice(0, 3);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ParamTypes);
+ NewFn =
+ Intrinsic::getOrInsertDeclaration(F->getParent(), ID, ParamTypes);
return true;
}
}
@@ -1230,8 +1235,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
FT->getParamType(0), // Dest
FT->getParamType(2) // len
};
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
- ParamTypes);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::memset, ParamTypes);
return true;
}
break;
@@ -1247,8 +1252,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
.Case("popc.i", Intrinsic::ctpop)
.Default(Intrinsic::not_intrinsic);
if (IID != Intrinsic::not_intrinsic) {
- NewFn = Intrinsic::getDeclaration(F->getParent(), IID,
- {F->getReturnType()});
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID,
+ {F->getReturnType()});
return true;
}
}
@@ -1316,8 +1321,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
F->getName() !=
Intrinsic::getName(Intrinsic::objectsize, Tys, F->getParent())) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
- Tys);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::objectsize, Tys);
return true;
}
}
@@ -1326,7 +1331,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
case 'p':
if (Name.starts_with("ptr.annotation.") && F->arg_size() == 4) {
rename(F);
- NewFn = Intrinsic::getDeclaration(
+ NewFn = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::ptr_annotation,
{F->arg_begin()->getType(), F->getArg(1)->getType()});
return true;
@@ -1345,7 +1350,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (ID != Intrinsic::not_intrinsic) {
if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
break; // No other applicable upgrades.
@@ -1359,7 +1364,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (!F->getFunctionType()->getParamType(2)->isIntegerTy(32) ||
F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
break; // No other applicable upgrades.
@@ -1376,7 +1381,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
if (ID != Intrinsic::not_intrinsic) {
if (F->getFunctionType()->getReturnType()->isIntegerTy(64)) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
break; // No other applicable upgrades.
@@ -1395,7 +1400,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
case 'v': {
if (Name == "var.annotation" && F->arg_size() == 4) {
rename(F);
- NewFn = Intrinsic::getDeclaration(
+ NewFn = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::var_annotation,
{{F->arg_begin()->getType(), F->getArg(1)->getType()}});
return true;
@@ -1413,8 +1418,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
rename(F);
- NewFn =
- Intrinsic::getDeclaration(F->getParent(), ID, F->getReturnType());
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
+ F->getReturnType());
return true;
}
@@ -1426,7 +1431,7 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
.Default(Intrinsic::not_intrinsic);
if (ID != Intrinsic::not_intrinsic) {
rename(F);
- NewFn = Intrinsic::getDeclaration(F->getParent(), ID);
+ NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID);
return true;
}
break; // No other 'wasm.dot.i8x16.i7x16.*'.
@@ -1740,8 +1745,8 @@ static Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
if (!IndexForm)
std::swap(Args[0], Args[1]);
- Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
- Args);
+ Value *V = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI.getModule(), IID), Args);
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
: Builder.CreateBitCast(CI.getArgOperand(1),
Ty);
@@ -1753,7 +1758,7 @@ static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
Type *Ty = CI.getType();
Value *Op0 = CI.getOperand(0);
Value *Op1 = CI.getOperand(1);
- Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
if (CI.arg_size() == 4) { // For masked intrinsics.
@@ -1780,7 +1785,7 @@ static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
if (CI.arg_size() == 4) { // For masked intrinsics.
@@ -1850,7 +1855,7 @@ static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
}
Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
- Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID, Ty);
Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
unsigned NumArgs = CI.arg_size();
@@ -1911,7 +1916,8 @@ static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {
Type *Ty = CI.getType();
Value *Op0 = CI.getArgOperand(0);
- Function *F = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::abs, Ty);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(CI.getModule(), Intrinsic::abs, Ty);
Value *Res = Builder.CreateCall(F, {Op0, Builder.getInt1(false)});
if (CI.arg_size() == 3)
Res = emitX86Select(Builder, CI.getArgOperand(2), Res, CI.getArgOperand(1));
@@ -2004,7 +2010,7 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
// Replace a masked intrinsic with an older unmasked intrinsic.
static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
Intrinsic::ID IID) {
- Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID);
+ Function *Intrin = Intrinsic::getOrInsertDeclaration(CI.getModule(), IID);
Value *Rep = Builder.CreateCall(Intrin,
{ CI.getArgOperand(0), CI.getArgOperand(1) });
return emitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
@@ -2263,8 +2269,8 @@ static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
SmallVector<Value *, 4> Args(CI.args());
Args.pop_back();
Args.pop_back();
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI.getModule(), IID), Args);
unsigned NumArgs = CI.arg_size();
Rep = emitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep,
CI.getArgOperand(NumArgs - 2));
@@ -2320,8 +2326,8 @@ static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
// llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 returns an i64.
Value *Arg = CI->getArgOperand(0);
Value *Ctlz = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
- {Arg->getType()}),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), Intrinsic::ctlz,
+ {Arg->getType()}),
{Arg, Builder.getFalse()}, "ctlz");
Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc");
} else if (Name == "popc.ll") {
@@ -2329,15 +2335,15 @@ static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
// i64.
Value *Arg = CI->getArgOperand(0);
Value *Popc = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
- {Arg->getType()}),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), Intrinsic::ctpop,
+ {Arg->getType()}),
Arg, "ctpop");
Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc");
} else if (Name == "h2f") {
- Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::convert_from_fp16,
- {Builder.getFloatTy()}),
- CI->getArgOperand(0), "h2f");
+ Rep = Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::convert_from_fp16,
+ {Builder.getFloatTy()}),
+ CI->getArgOperand(0), "h2f");
} else if (Name.consume_front("bitcast.") &&
(Name == "f2i" || Name == "i2f" || Name == "ll2d" ||
Name == "d2ll")) {
@@ -2373,7 +2379,7 @@ static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
if (IID != Intrinsic::not_intrinsic &&
!F->getReturnType()->getScalarType()->isBFloatTy()) {
rename(F);
- Function *NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
+ Function *NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), IID);
SmallVector<Value *, 2> Args;
for (size_t I = 0; I < NewFn->arg_size(); ++I) {
Value *Arg = CI->getArgOperand(I);
@@ -2480,15 +2486,15 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
} else if (Name == "sse.sqrt.ss" || Name == "sse2.sqrt.sd") {
Value *Vec = CI->getArgOperand(0);
Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0);
- Function *Intr = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sqrt,
- Elt0->getType());
+ Function *Intr = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::sqrt, Elt0->getType());
Elt0 = Builder.CreateCall(Intr, Elt0);
Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0);
} else if (Name.starts_with("avx.sqrt.p") ||
Name.starts_with("sse2.sqrt.p") ||
Name.starts_with("sse.sqrt.p")) {
Rep =
- Builder.CreateCall(Intrinsic::getDeclaration(
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::sqrt, CI->getType()),
{CI->getArgOperand(0)});
} else if (Name.starts_with("avx512.mask.sqrt.p")) {
@@ -2499,13 +2505,13 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
: Intrinsic::x86_avx512_sqrt_pd_512;
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(3)};
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
} else {
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::sqrt,
- CI->getType()),
- {CI->getArgOperand(0)});
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), Intrinsic::sqrt,
+ CI->getType()),
+ {CI->getArgOperand(0)});
}
Rep =
emitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1));
@@ -2629,8 +2635,9 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
break;
}
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- {CI->getOperand(0), CI->getArgOperand(1)});
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
+ {CI->getOperand(0), CI->getArgOperand(1)});
Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
} else if (Name.starts_with("avx512.mask.fpclass.p")) {
Type *OpTy = CI->getArgOperand(0)->getType();
@@ -2652,8 +2659,9 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
else
llvm_unreachable("Unexpected intrinsic");
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- {CI->getOperand(0), CI->getArgOperand(1)});
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
+ {CI->getOperand(0), CI->getArgOperand(1)});
Rep = applyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
} else if (Name.starts_with("avx512.cmp.p")) {
SmallVector<Value *, 4> Args(CI->args());
@@ -2681,8 +2689,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
std::swap(Mask, Args.back());
Args.push_back(Mask);
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID), Args);
} else if (Name.starts_with("avx512.mask.cmp.")) {
// Integer compare intrinsics.
unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
@@ -2776,8 +2784,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) {
Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round
: Intrinsic::x86_avx512_sitofp_round;
- Function *F =
- Intrinsic::getDeclaration(CI->getModule(), IID, {DstTy, SrcTy});
+ Function *F = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID,
+ {DstTy, SrcTy});
Rep = Builder.CreateCall(F, {Rep, CI->getArgOperand(3)});
} else {
Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt")
@@ -2819,7 +2827,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2),
ResultTy->getNumElements());
- Function *ELd = Intrinsic::getDeclaration(
+ Function *ELd = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::masked_expandload, ResultTy);
Rep = Builder.CreateCall(ELd, {Ptr, MaskVec, CI->getOperand(1)});
} else if (Name.starts_with("avx512.mask.compress.store.")) {
@@ -2834,7 +2842,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
getX86MaskVec(Builder, CI->getArgOperand(2),
cast<FixedVectorType>(ResultTy)->getNumElements());
- Function *CSt = Intrinsic::getDeclaration(
+ Function *CSt = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::masked_compressstore, ResultTy);
Rep = Builder.CreateCall(CSt, {CI->getArgOperand(1), Ptr, MaskVec});
} else if (Name.starts_with("avx512.mask.compress.") ||
@@ -2847,7 +2855,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool IsCompress = Name[12] == 'c';
Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
: Intrinsic::x86_avx512_mask_expand;
- Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy);
+ Function *Intr =
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID, ResultTy);
Rep = Builder.CreateCall(Intr,
{CI->getOperand(0), CI->getOperand(1), MaskVec});
} else if (Name.starts_with("xop.vpcom")) {
@@ -2910,7 +2919,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
bool ZeroMask = Name[11] == 'z';
Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
} else if (Name == "sse42.crc32.64.8") {
- Function *CRC32 = Intrinsic::getDeclaration(
+ Function *CRC32 = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::x86_sse42_crc32_32_8);
Value *Trunc0 =
Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
@@ -3405,7 +3414,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_add_pd_512;
Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), IID),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
{CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)});
} else {
Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
@@ -3421,7 +3430,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_div_pd_512;
Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), IID),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
{CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)});
} else {
Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
@@ -3437,7 +3446,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_mul_pd_512;
Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), IID),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
{CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)});
} else {
Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
@@ -3453,7 +3462,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_sub_pd_512;
Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), IID),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
{CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)});
} else {
Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
@@ -3471,13 +3480,13 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
Rep = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), IID),
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
{CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(4)});
Rep =
emitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2));
} else if (Name.starts_with("avx512.mask.lzcnt.")) {
Rep =
- Builder.CreateCall(Intrinsic::getDeclaration(
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::ctlz, CI->getType()),
{CI->getArgOperand(0), Builder.getInt1(false)});
Rep =
@@ -3723,10 +3732,10 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
if (NegAcc)
Ops[2] = Builder.CreateFNeg(Ops[2]);
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
- Intrinsic::fma,
- Ops[0]->getType()),
- Ops);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), Intrinsic::fma,
+ Ops[0]->getType()),
+ Ops);
if (IsScalar)
Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0);
@@ -3738,10 +3747,10 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(),
- Intrinsic::fma,
- Ops[0]->getType()),
- Ops);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), Intrinsic::fma,
+ Ops[0]->getType()),
+ Ops);
Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()),
Rep, (uint64_t)0);
@@ -3781,11 +3790,11 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
IID = Intrinsic::x86_avx512_vfmadd_f64;
else
IID = Intrinsic::x86_avx512_vfmadd_f32;
- Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID);
+ Function *FMA = Intrinsic::getOrInsertDeclaration(CI->getModule(), IID);
Rep = Builder.CreateCall(FMA, Ops);
} else {
- Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
- A->getType());
+ Function *FMA = Intrinsic::getOrInsertDeclaration(
+ CI->getModule(), Intrinsic::fma, A->getType());
Rep = Builder.CreateCall(FMA, {A, B, C});
}
@@ -3837,11 +3846,12 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
else
IID = Intrinsic::x86_avx512_vfmadd_pd_512;
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- {A, B, C, CI->getArgOperand(4)});
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID),
+ {A, B, C, CI->getArgOperand(4)});
} else {
- Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
- A->getType());
+ Function *FMA = Intrinsic::getOrInsertDeclaration(
+ CI->getModule(), Intrinsic::fma, A->getType());
Rep = Builder.CreateCall(FMA, {A, B, C});
}
@@ -3868,8 +3878,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Ops[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
Ops[2] = Builder.CreateFNeg(Ops[2]);
- Rep =
- Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), Ops);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID), Ops);
} else if (Name.starts_with("avx512.mask.vfmaddsub.p") ||
Name.starts_with("avx512.mask3.vfmaddsub.p") ||
Name.starts_with("avx512.maskz.vfmaddsub.p") ||
@@ -3892,16 +3902,16 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
if (IsSubAdd)
Ops[2] = Builder.CreateFNeg(Ops[2]);
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- Ops);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(F->getParent(), IID), Ops);
} else {
int NumElts = cast<FixedVectorType>(CI->getType())->getNumElements();
Value *Ops[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
- Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma,
- Ops[0]->getType());
+ Function *FMA = Intrinsic::getOrInsertDeclaration(
+ CI->getModule(), Intrinsic::fma, Ops[0]->getType());
Value *Odd = Builder.CreateCall(FMA, Ops);
Ops[2] = Builder.CreateFNeg(Ops[2]);
Value *Even = Builder.CreateCall(FMA, Ops);
@@ -3944,8 +3954,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), CI->getArgOperand(3)};
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
: CI->getArgOperand(0);
Rep = emitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru);
@@ -3972,8 +3982,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
: CI->getArgOperand(0);
Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
@@ -4008,8 +4018,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
: CI->getArgOperand(0);
Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
@@ -4038,8 +4048,8 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
+ Rep = Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
: CI->getArgOperand(0);
Rep = emitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
@@ -4062,7 +4072,7 @@ static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Value *Args[] = {CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2)};
Value *NewCall = Builder.CreateCall(
- Intrinsic::getDeclaration(CI->getModule(), IID), Args);
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), IID), Args);
// Extract the second result and store it.
Value *Data = Builder.CreateExtractValue(NewCall, 1);
@@ -4108,7 +4118,7 @@ static Value *upgradeAArch64IntrinsicCall(StringRef Name, CallBase *CI,
Args[1] = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
GoodPredTy, Args[1]);
- Function *NewF = Intrinsic::getDeclaration(CI->getModule(), NewID);
+ Function *NewF = Intrinsic::getOrInsertDeclaration(CI->getModule(), NewID);
return Builder.CreateCall(NewF, Args, CI->getName());
}
@@ -4117,16 +4127,17 @@ static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
if (Name == "mve.vctp64.old") {
// Replace the old v4i1 vctp64 with a v2i1 vctp and predicate-casts to the
// correct type.
- Value *VCTP = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::arm_mve_vctp64),
- CI->getArgOperand(0), CI->getName());
+ Value *VCTP =
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::arm_mve_vctp64),
+ CI->getArgOperand(0), CI->getName());
Value *C1 = Builder.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::arm_mve_pred_v2i,
{VectorType::get(Builder.getInt1Ty(), 2, false)}),
VCTP);
return Builder.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::arm_mve_pred_i2v,
{VectorType::get(Builder.getInt1Ty(), 4, false)}),
C1);
@@ -4188,19 +4199,19 @@ static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
Type *Ty = Op->getType();
if (Ty->getScalarSizeInBits() == 1) {
Value *C1 = Builder.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::arm_mve_pred_v2i,
{VectorType::get(Builder.getInt1Ty(), 4, false)}),
Op);
Op = Builder.CreateCall(
- Intrinsic::getDeclaration(F->getParent(),
- Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
+ Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::arm_mve_pred_i2v, {V2I1Ty}),
C1);
}
Ops.push_back(Op);
}
- Function *Fn = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
return Builder.CreateCall(Fn, Ops, CI->getName());
}
llvm_unreachable("Unknown function for ARM CallBase upgrade.");
@@ -5088,7 +5099,8 @@ void llvm::UpgradeARCRuntime(Module &M) {
if (!Fn)
return;
- Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
+ Function *NewFn =
+ llvm::Intrinsic::getOrInsertDeclaration(&M, IntrinsicFunc);
for (User *U : make_early_inc_range(Fn->users())) {
CallInst *CI = dyn_cast<CallInst>(U);
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index ee084e870263d0..1cf998c6850068 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -2468,7 +2468,7 @@ LLVMValueRef LLVMGetIntrinsicDeclaration(LLVMModuleRef Mod,
size_t ParamCount) {
ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
auto IID = llvm_map_to_intrinsic_id(ID);
- return wrap(llvm::Intrinsic::getDeclaration(unwrap(Mod), IID, Tys));
+ return wrap(llvm::Intrinsic::getOrInsertDeclaration(unwrap(Mod), IID, Tys));
}
const char *LLVMIntrinsicGetName(unsigned ID, size_t *NameLength) {
diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp
index 0db82cdd6373c8..447a9d65174636 100644
--- a/llvm/lib/IR/DIBuilder.cpp
+++ b/llvm/lib/IR/DIBuilder.cpp
@@ -991,7 +991,7 @@ DbgInstPtr DIBuilder::insertDbgAssign(Instruction *LinkedInstr, Value *Val,
LLVMContext &Ctx = LinkedInstr->getContext();
Module *M = LinkedInstr->getModule();
if (!AssignFn)
- AssignFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_assign);
+ AssignFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_assign);
std::array<Value *, 6> Args = {
MetadataAsValue::get(Ctx, ValueAsMetadata::get(Val)),
@@ -1060,7 +1060,7 @@ static Value *getDbgIntrinsicValueImpl(LLVMContext &VMContext, Value *V) {
}
static Function *getDeclareIntrin(Module &M) {
- return Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
+ return Intrinsic::getOrInsertDeclaration(&M, Intrinsic::dbg_declare);
}
DbgInstPtr DIBuilder::insertDbgValueIntrinsic(
@@ -1074,7 +1074,7 @@ DbgInstPtr DIBuilder::insertDbgValueIntrinsic(
}
if (!ValueFn)
- ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
+ ValueFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::dbg_value);
return insertDbgIntrinsic(ValueFn, Val, VarInfo, Expr, DL, InsertBB,
InsertBefore);
}
@@ -1175,7 +1175,7 @@ DbgInstPtr DIBuilder::insertLabel(DILabel *LabelInfo, const DILocation *DL,
}
if (!LabelFn)
- LabelFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_label);
+ LabelFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::dbg_label);
Value *Args[] = {MetadataAsValue::get(VMContext, LabelInfo)};
diff --git a/llvm/lib/IR/DebugProgramInstruction.cpp b/llvm/lib/IR/DebugProgramInstruction.cpp
index 0db908211b553c..b37dbd534092c3 100644
--- a/llvm/lib/IR/DebugProgramInstruction.cpp
+++ b/llvm/lib/IR/DebugProgramInstruction.cpp
@@ -413,13 +413,13 @@ DbgVariableRecord::createDebugIntrinsic(Module *M,
// Work out what sort of intrinsic we're going to produce.
switch (getType()) {
case DbgVariableRecord::LocationType::Declare:
- IntrinsicFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_declare);
+ IntrinsicFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_declare);
break;
case DbgVariableRecord::LocationType::Value:
- IntrinsicFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_value);
+ IntrinsicFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_value);
break;
case DbgVariableRecord::LocationType::Assign:
- IntrinsicFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_assign);
+ IntrinsicFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_assign);
break;
case DbgVariableRecord::LocationType::End:
case DbgVariableRecord::LocationType::Any:
@@ -459,7 +459,7 @@ DbgVariableRecord::createDebugIntrinsic(Module *M,
DbgLabelInst *
DbgLabelRecord::createDebugIntrinsic(Module *M,
Instruction *InsertBefore) const {
- auto *LabelFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_label);
+ auto *LabelFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_label);
Value *Args[] = {
MetadataAsValue::get(getDebugLoc()->getContext(), getLabel())};
DbgLabelInst *DbgLabel = cast<DbgLabelInst>(
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 8bf695e835c368..3654bf9a9e70b5 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -91,8 +91,8 @@ Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
if (cast<ConstantInt>(Scaling)->isZero())
return Scaling;
Module *M = GetInsertBlock()->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::vscale,
+ {Scaling->getType()});
CallInst *CI = CreateCall(TheFn, {}, {}, Name);
return cast<ConstantInt>(Scaling)->isOne() ? CI : CreateMul(CI, Scaling);
}
@@ -142,7 +142,8 @@ CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
Type *Tys[] = { Ptr->getType(), Size->getType() };
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
+ Function *TheFn =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -170,7 +171,8 @@ CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
Type *Tys[] = {Dst->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys);
+ Function *TheFn =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::memset_inline, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -197,7 +199,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
Type *Tys[] = {Ptr->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memset_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -227,7 +229,7 @@ CallInst *IRBuilderBase::CreateMemTransferInst(
Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(M, IntrID, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -265,7 +267,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memcpy_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -381,7 +383,7 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::memmove_element_unordered_atomic, Tys);
CallInst *CI = CreateCall(TheFn, Ops);
@@ -411,23 +413,23 @@ CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Src};
Type *Tys[] = { Src->getType() };
- auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
+ auto Decl = Intrinsic::getOrInsertDeclaration(M, ID, Tys);
return CreateCall(Decl, Ops);
}
CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Acc, Src};
- auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
- {Src->getType()});
+ auto Decl = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_reduce_fadd, {Src->getType()});
return CreateCall(Decl, Ops);
}
CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
Module *M = GetInsertBlock()->getParent()->getParent();
Value *Ops[] = {Acc, Src};
- auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
- {Src->getType()});
+ auto Decl = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_reduce_fmul, {Src->getType()});
return CreateCall(Decl, Ops);
}
@@ -489,8 +491,8 @@ CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
"lifetime.start requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::lifetime_start, {Ptr->getType()});
return CreateCall(TheFn, Ops);
}
@@ -504,8 +506,8 @@ CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
"lifetime.end requires the size to be an i64");
Value *Ops[] = { Size, Ptr };
Module *M = BB->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::lifetime_end, {Ptr->getType()});
return CreateCall(TheFn, Ops);
}
@@ -523,8 +525,8 @@ CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
// Fill in the single overloaded type: memory object type.
Type *ObjectPtr[1] = {Ptr->getType()};
Module *M = BB->getParent()->getParent();
- Function *TheFn =
- Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::invariant_start, ObjectPtr);
return CreateCall(TheFn, Ops);
}
@@ -556,13 +558,13 @@ IRBuilderBase::CreateAssumption(Value *Cond,
Value *Ops[] = { Cond };
Module *M = BB->getParent()->getParent();
- Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
+ Function *FnAssume = Intrinsic::getOrInsertDeclaration(M, Intrinsic::assume);
return CreateCall(FnAssume, Ops, OpBundles);
}
Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
Module *M = BB->getModule();
- auto *FnIntrinsic = Intrinsic::getDeclaration(
+ auto *FnIntrinsic = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_noalias_scope_decl, {});
return CreateCall(FnIntrinsic, {Scope});
}
@@ -615,7 +617,7 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
ArrayRef<Type *> OverloadedTypes,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
- Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(M, Id, OverloadedTypes);
return CreateCall(TheFn, Ops, {}, Name);
}
@@ -765,9 +767,9 @@ static CallInst *CreateGCStatepointCallCommon(
const Twine &Name) {
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
// Fill in the one generic type'd argument (the function is also vararg)
- Function *FnStatepoint =
- Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
- {ActualCallee.getCallee()->getType()});
+ Function *FnStatepoint = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::experimental_gc_statepoint,
+ {ActualCallee.getCallee()->getType()});
std::vector<Value *> Args = getStatepointArgs(
*Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
@@ -820,9 +822,9 @@ static InvokeInst *CreateGCStatepointInvokeCommon(
const Twine &Name) {
Module *M = Builder->GetInsertBlock()->getParent()->getParent();
// Fill in the one generic type'd argument (the function is also vararg)
- Function *FnStatepoint =
- Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
- {ActualInvokee.getCallee()->getType()});
+ Function *FnStatepoint = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::experimental_gc_statepoint,
+ {ActualInvokee.getCallee()->getType()});
std::vector<Value *> Args =
getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
@@ -875,7 +877,7 @@ CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
Intrinsic::ID ID = Intrinsic::experimental_gc_result;
Module *M = BB->getParent()->getParent();
Type *Types[] = {ResultType};
- Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
+ Function *FnGCResult = Intrinsic::getOrInsertDeclaration(M, ID, Types);
Value *Args[] = {Statepoint};
return CreateCall(FnGCResult, Args, {}, Name);
@@ -886,8 +888,8 @@ CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
Type *ResultType, const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *Types[] = {ResultType};
- Function *FnGCRelocate =
- Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
+ Function *FnGCRelocate = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::experimental_gc_relocate, Types);
Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
return CreateCall(FnGCRelocate, Args, {}, Name);
@@ -897,7 +899,7 @@ CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *PtrTy = DerivedPtr->getType();
- Function *FnGCFindBase = Intrinsic::getDeclaration(
+ Function *FnGCFindBase = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name);
}
@@ -906,7 +908,7 @@ CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
const Twine &Name) {
Module *M = BB->getParent()->getParent();
Type *PtrTy = DerivedPtr->getType();
- Function *FnGCGetOffset = Intrinsic::getDeclaration(
+ Function *FnGCGetOffset = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name);
}
@@ -915,7 +917,7 @@ CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
Instruction *FMFSource,
const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {V->getType()});
return createCallHelper(Fn, {V}, Name, FMFSource);
}
@@ -923,7 +925,7 @@ Value *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
Value *RHS, Instruction *FMFSource,
const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {LHS->getType()});
if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Fn->getReturnType(),
FMFSource))
return V;
@@ -936,7 +938,7 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
Instruction *FMFSource,
const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, Types);
return createCallHelper(Fn, Args, Name, FMFSource);
}
@@ -963,7 +965,7 @@ CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
"Wrong types for intrinsic!");
// TODO: Handle varargs intrinsics.
- Function *Fn = Intrinsic::getDeclaration(M, ID, OverloadTys);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, OverloadTys);
return createCallHelper(Fn, Args, Name, FMFSource);
}
@@ -1120,7 +1122,7 @@ Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
"launder.invariant.group only applies to pointers.");
auto *PtrType = Ptr->getType();
Module *M = BB->getParent()->getParent();
- Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
+ Function *FnLaunderInvariantGroup = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::launder_invariant_group, {PtrType});
assert(FnLaunderInvariantGroup->getReturnType() == PtrType &&
@@ -1137,7 +1139,7 @@ Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
auto *PtrType = Ptr->getType();
Module *M = BB->getParent()->getParent();
- Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
+ Function *FnStripInvariantGroup = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::strip_invariant_group, {PtrType});
assert(FnStripInvariantGroup->getReturnType() == PtrType &&
@@ -1152,7 +1154,8 @@ Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
auto *Ty = cast<VectorType>(V->getType());
if (isa<ScalableVectorType>(Ty)) {
Module *M = BB->getParent()->getParent();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, Ty);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::vector_reverse, Ty);
return Insert(CallInst::Create(F, V), Name);
}
// Keep the original behaviour for fixed vector
@@ -1171,7 +1174,8 @@ Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
Module *M = BB->getParent()->getParent();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::vector_splice, VTy);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::vector_splice, VTy);
Value *Ops[] = {V1, V2, getInt32(Imm)};
return Insert(CallInst::Create(F, Ops), Name);
@@ -1225,7 +1229,7 @@ Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
Module *M = BB->getParent()->getParent();
- Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
+ Function *FnPreserveArrayAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
Value *DimV = getInt32(Dimension);
@@ -1246,7 +1250,7 @@ Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
auto *BaseType = Base->getType();
Module *M = BB->getParent()->getParent();
- Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
+ Function *FnPreserveUnionAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
Value *DIIndex = getInt32(FieldIndex);
@@ -1271,7 +1275,7 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(
GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
Module *M = BB->getParent()->getParent();
- Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
+ Function *FnPreserveStructAccessIndex = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
Value *DIIndex = getInt32(FieldIndex);
@@ -1288,8 +1292,8 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(
Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) {
ConstantInt *TestV = getInt32(Test);
Module *M = BB->getParent()->getParent();
- Function *FnIsFPClass =
- Intrinsic::getDeclaration(M, Intrinsic::is_fpclass, {FPNum->getType()});
+ Function *FnIsFPClass = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::is_fpclass, {FPNum->getType()});
return CreateCall(FnIsFPClass, {FPNum, TestV});
}
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 0a6c93fde6302f..002bab8e079e50 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -629,9 +629,8 @@ bool VPIntrinsic::canIgnoreVectorLengthParam() const {
return false;
}
-Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
- Type *ReturnType,
- ArrayRef<Value *> Params) {
+Function *VPIntrinsic::getOrInsertDeclarationForParams(
+ Module *M, Intrinsic::ID VPID, Type *ReturnType, ArrayRef<Value *> Params) {
assert(isVPIntrinsic(VPID) && "not a VP intrinsic");
Function *VPFunc;
switch (VPID) {
@@ -641,7 +640,7 @@ Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
OverloadTy =
Params[*VPReductionIntrinsic::getVectorParamPos(VPID)]->getType();
- VPFunc = Intrinsic::getDeclaration(M, VPID, OverloadTy);
+ VPFunc = Intrinsic::getOrInsertDeclaration(M, VPID, OverloadTy);
break;
}
case Intrinsic::vp_trunc:
@@ -658,43 +657,43 @@ Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
case Intrinsic::vp_lrint:
case Intrinsic::vp_llrint:
case Intrinsic::vp_cttz_elts:
- VPFunc =
- Intrinsic::getDeclaration(M, VPID, {ReturnType, Params[0]->getType()});
+ VPFunc = Intrinsic::getOrInsertDeclaration(
+ M, VPID, {ReturnType, Params[0]->getType()});
break;
case Intrinsic::vp_is_fpclass:
- VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[0]->getType()});
+ VPFunc = Intrinsic::getOrInsertDeclaration(M, VPID, {Params[0]->getType()});
break;
case Intrinsic::vp_merge:
case Intrinsic::vp_select:
- VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
+ VPFunc = Intrinsic::getOrInsertDeclaration(M, VPID, {Params[1]->getType()});
break;
case Intrinsic::vp_load:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID, {ReturnType, Params[0]->getType()});
break;
case Intrinsic::experimental_vp_strided_load:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID, {ReturnType, Params[0]->getType(), Params[1]->getType()});
break;
case Intrinsic::vp_gather:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID, {ReturnType, Params[0]->getType()});
break;
case Intrinsic::vp_store:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID, {Params[0]->getType(), Params[1]->getType()});
break;
case Intrinsic::experimental_vp_strided_store:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID,
{Params[0]->getType(), Params[1]->getType(), Params[2]->getType()});
break;
case Intrinsic::vp_scatter:
- VPFunc = Intrinsic::getDeclaration(
+ VPFunc = Intrinsic::getOrInsertDeclaration(
M, VPID, {Params[0]->getType(), Params[1]->getType()});
break;
case Intrinsic::experimental_vp_splat:
- VPFunc = Intrinsic::getDeclaration(M, VPID, ReturnType);
+ VPFunc = Intrinsic::getOrInsertDeclaration(M, VPID, ReturnType);
break;
}
assert(VPFunc && "Could not declare VP intrinsic");
diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp
index ef26b1926b9767..ff8b4b7a020c2f 100644
--- a/llvm/lib/IR/Intrinsics.cpp
+++ b/llvm/lib/IR/Intrinsics.cpp
@@ -713,7 +713,8 @@ Intrinsic::ID Intrinsic::lookupIntrinsicID(StringRef Name) {
#include "llvm/IR/IntrinsicImpl.inc"
#undef GET_INTRINSIC_ATTRIBUTES
-Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type *> Tys) {
+Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id,
+ ArrayRef<Type *> Tys) {
// There can never be multiple globals with the same name of
diff erent types,
// because intrinsics must be a specific type.
auto *FT = getType(M->getContext(), id, Tys);
@@ -1078,7 +1079,7 @@ std::optional<Function *> Intrinsic::remangleIntrinsicFunction(Function *F) {
// invalid and we'll get an error.
ExistingGV->setName(WantedName + ".renamed");
}
- return Intrinsic::getDeclaration(F->getParent(), ID, ArgTys);
+ return Intrinsic::getOrInsertDeclaration(F->getParent(), ID, ArgTys);
}();
NewDecl->setCallingConv(F->getCallingConv());
diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp
index 704bc8d339bc57..ab48d3e4101b72 100644
--- a/llvm/lib/IR/Module.cpp
+++ b/llvm/lib/IR/Module.cpp
@@ -89,21 +89,22 @@ Module::~Module() {
void Module::removeDebugIntrinsicDeclarations() {
auto *DeclareIntrinsicFn =
- Intrinsic::getDeclaration(this, Intrinsic::dbg_declare);
+ Intrinsic::getOrInsertDeclaration(this, Intrinsic::dbg_declare);
assert((!isMaterialized() || DeclareIntrinsicFn->hasZeroLiveUses()) &&
"Debug declare intrinsic should have had uses removed.");
DeclareIntrinsicFn->eraseFromParent();
auto *ValueIntrinsicFn =
- Intrinsic::getDeclaration(this, Intrinsic::dbg_value);
+ Intrinsic::getOrInsertDeclaration(this, Intrinsic::dbg_value);
assert((!isMaterialized() || ValueIntrinsicFn->hasZeroLiveUses()) &&
"Debug value intrinsic should have had uses removed.");
ValueIntrinsicFn->eraseFromParent();
auto *AssignIntrinsicFn =
- Intrinsic::getDeclaration(this, Intrinsic::dbg_assign);
+ Intrinsic::getOrInsertDeclaration(this, Intrinsic::dbg_assign);
assert((!isMaterialized() || AssignIntrinsicFn->hasZeroLiveUses()) &&
"Debug assign intrinsic should have had uses removed.");
AssignIntrinsicFn->eraseFromParent();
- auto *LabelntrinsicFn = Intrinsic::getDeclaration(this, Intrinsic::dbg_label);
+ auto *LabelntrinsicFn =
+ Intrinsic::getOrInsertDeclaration(this, Intrinsic::dbg_label);
assert((!isMaterialized() || LabelntrinsicFn->hasZeroLiveUses()) &&
"Debug label intrinsic should have had uses removed.");
LabelntrinsicFn->eraseFromParent();
diff --git a/llvm/lib/IR/VectorBuilder.cpp b/llvm/lib/IR/VectorBuilder.cpp
index f42948ba89042f..737f49b1334d76 100644
--- a/llvm/lib/IR/VectorBuilder.cpp
+++ b/llvm/lib/IR/VectorBuilder.cpp
@@ -108,8 +108,8 @@ Value *VectorBuilder::createVectorInstructionImpl(Intrinsic::ID VPID,
if (VLenPosOpt)
IntrinParams[*VLenPosOpt] = &requestEVL();
- auto *VPDecl = VPIntrinsic::getDeclarationForParams(&getModule(), VPID,
- ReturnTy, IntrinParams);
+ auto *VPDecl = VPIntrinsic::getOrInsertDeclarationForParams(
+ &getModule(), VPID, ReturnTy, IntrinParams);
return Builder.CreateCall(VPDecl, IntrinParams, Name);
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8a217cd1ec5cf9..ae96e277b5fc69 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16454,8 +16454,8 @@ static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
Builder.CreateShuffleVector(TI->getOperand(0), ShuffleLanes), VecTy));
if (Parts.size() == 4) {
- auto *F = Intrinsic::getDeclaration(TI->getModule(),
- Intrinsic::aarch64_neon_tbl4, VecTy);
+ auto *F = Intrinsic::getOrInsertDeclaration(
+ TI->getModule(), Intrinsic::aarch64_neon_tbl4, VecTy);
Parts.push_back(ConstantVector::get(MaskConst));
Results.push_back(Builder.CreateCall(F, Parts));
Parts.clear();
@@ -16484,7 +16484,7 @@ static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
break;
}
- auto *F = Intrinsic::getDeclaration(TI->getModule(), TblID, VecTy);
+ auto *F = Intrinsic::getOrInsertDeclaration(TI->getModule(), TblID, VecTy);
Parts.push_back(ConstantVector::get(MaskConst));
Results.push_back(Builder.CreateCall(F, Parts));
}
@@ -16765,9 +16765,10 @@ static Function *getStructuredLoadFunction(Module *M, unsigned Factor,
Intrinsic::aarch64_neon_ld3,
Intrinsic::aarch64_neon_ld4};
if (Scalable)
- return Intrinsic::getDeclaration(M, SVELoads[Factor - 2], {LDVTy});
+ return Intrinsic::getOrInsertDeclaration(M, SVELoads[Factor - 2], {LDVTy});
- return Intrinsic::getDeclaration(M, NEONLoads[Factor - 2], {LDVTy, PtrTy});
+ return Intrinsic::getOrInsertDeclaration(M, NEONLoads[Factor - 2],
+ {LDVTy, PtrTy});
}
static Function *getStructuredStoreFunction(Module *M, unsigned Factor,
@@ -16781,9 +16782,10 @@ static Function *getStructuredStoreFunction(Module *M, unsigned Factor,
Intrinsic::aarch64_neon_st3,
Intrinsic::aarch64_neon_st4};
if (Scalable)
- return Intrinsic::getDeclaration(M, SVEStores[Factor - 2], {STVTy});
+ return Intrinsic::getOrInsertDeclaration(M, SVEStores[Factor - 2], {STVTy});
- return Intrinsic::getDeclaration(M, NEONStores[Factor - 2], {STVTy, PtrTy});
+ return Intrinsic::getOrInsertDeclaration(M, NEONStores[Factor - 2],
+ {STVTy, PtrTy});
}
/// Lower an interleaved load into a ldN intrinsic.
@@ -27247,7 +27249,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
if (ValueTy->getPrimitiveSizeInBits() == 128) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
- Function *Ldxr = Intrinsic::getDeclaration(M, Int);
+ Function *Ldxr = Intrinsic::getOrInsertDeclaration(M, Int);
Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
@@ -27266,7 +27268,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int =
IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
- Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys);
+ Function *Ldxr = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
const DataLayout &DL = M->getDataLayout();
IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
@@ -27281,7 +27283,8 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
IRBuilderBase &Builder) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex));
+ Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_clrex));
}
Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
@@ -27296,7 +27299,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
if (Val->getType()->getPrimitiveSizeInBits() == 128) {
Intrinsic::ID Int =
IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
- Function *Stxr = Intrinsic::getDeclaration(M, Int);
+ Function *Stxr = Intrinsic::getOrInsertDeclaration(M, Int);
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Type *Int128Ty = Type::getInt128Ty(M->getContext());
@@ -27311,7 +27314,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Intrinsic::ID Int =
IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
Type *Tys[] = { Addr->getType() };
- Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys);
+ Function *Stxr = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
const DataLayout &DL = M->getDataLayout();
IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
@@ -27348,7 +27351,7 @@ bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
Offset),
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index e62437c28b863f..fe96fedcfb82dc 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -436,10 +436,10 @@ Instruction *AArch64StackTagging::collectInitializers(Instruction *StartInst,
void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
Value *Ptr, uint64_t Size) {
- auto SetTagZeroFunc =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag_zero);
- auto StgpFunc =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_stgp);
+ auto SetTagZeroFunc = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::aarch64_settag_zero);
+ auto StgpFunc = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::aarch64_stgp);
InitializerBuilder IB(Size, DL, Ptr, SetTagFunc, SetTagZeroFunc, StgpFunc);
bool LittleEndian =
@@ -481,8 +481,8 @@ Instruction *AArch64StackTagging::insertBaseTaggedPointer(
assert(PrologueBB);
IRBuilder<> IRB(&PrologueBB->front());
- Function *IRG_SP =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_irg_sp);
+ Function *IRG_SP = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), Intrinsic::aarch64_irg_sp);
Instruction *Base =
IRB.CreateCall(IRG_SP, {Constant::getNullValue(IRB.getInt64Ty())});
Base->setName("basetag");
@@ -563,8 +563,8 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
LI = DeleteLI.get();
}
- SetTagFunc =
- Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
+ SetTagFunc = Intrinsic::getOrInsertDeclaration(F->getParent(),
+ Intrinsic::aarch64_settag);
Instruction *Base =
insertBaseTaggedPointer(*Fn.getParent(), SInfo.AllocasToInstrument, DT);
@@ -580,7 +580,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
NextTag = (NextTag + 1) % 16;
// Replace alloca with tagp(alloca).
IRBuilder<> IRB(Info.AI->getNextNode());
- Function *TagP = Intrinsic::getDeclaration(
+ Function *TagP = Intrinsic::getOrInsertDeclaration(
F->getParent(), Intrinsic::aarch64_tagp, {Info.AI->getType()});
Instruction *TagPCall =
IRB.CreateCall(TagP, {Constant::getNullValue(Info.AI->getType()), Base,
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 7b74bb2a03a642..91ab3fcfc4c70e 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1637,7 +1637,7 @@ static std::optional<Instruction *> instCombineSVEAllActive(IntrinsicInst &II,
return std::nullopt;
auto *Mod = II.getModule();
- auto *NewDecl = Intrinsic::getDeclaration(Mod, IID, {II.getType()});
+ auto *NewDecl = Intrinsic::getOrInsertDeclaration(Mod, IID, {II.getType()});
II.setCalledFunction(NewDecl);
return &II;
diff --git a/llvm/lib/Target/AArch64/SMEABIPass.cpp b/llvm/lib/Target/AArch64/SMEABIPass.cpp
index 174d95333d918d..2ee16a873e33b8 100644
--- a/llvm/lib/Target/AArch64/SMEABIPass.cpp
+++ b/llvm/lib/Target/AArch64/SMEABIPass.cpp
@@ -71,7 +71,7 @@ void emitTPIDR2Save(Module *M, IRBuilder<> &Builder) {
// A save to TPIDR2 should be followed by clearing TPIDR2_EL0.
Function *WriteIntr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_set_tpidr2);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_sme_set_tpidr2);
Builder.CreateCall(WriteIntr->getFunctionType(), WriteIntr,
Builder.getInt64(0));
}
@@ -114,7 +114,7 @@ bool SMEABI::updateNewStateFunctions(Module *M, Function *F,
// Read TPIDR2_EL0 in PreludeBB & branch to SaveBB if not 0.
Builder.SetInsertPoint(PreludeBB);
Function *TPIDR2Intr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_get_tpidr2);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_sme_get_tpidr2);
auto *TPIDR2 = Builder.CreateCall(TPIDR2Intr->getFunctionType(), TPIDR2Intr,
{}, "tpidr2");
auto *Cmp = Builder.CreateCmp(ICmpInst::ICMP_NE, TPIDR2,
@@ -128,20 +128,20 @@ bool SMEABI::updateNewStateFunctions(Module *M, Function *F,
// Enable pstate.za at the start of the function.
Builder.SetInsertPoint(&OrigBB->front());
Function *EnableZAIntr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_za_enable);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_sme_za_enable);
Builder.CreateCall(EnableZAIntr->getFunctionType(), EnableZAIntr);
}
if (FnAttrs.isNewZA()) {
Function *ZeroIntr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_zero);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_sme_zero);
Builder.CreateCall(ZeroIntr->getFunctionType(), ZeroIntr,
Builder.getInt32(0xff));
}
if (FnAttrs.isNewZT0()) {
Function *ClearZT0Intr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_zero_zt);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::aarch64_sme_zero_zt);
Builder.CreateCall(ClearZT0Intr->getFunctionType(), ClearZT0Intr,
{Builder.getInt32(0)});
}
@@ -153,8 +153,8 @@ bool SMEABI::updateNewStateFunctions(Module *M, Function *F,
if (!T || !isa<ReturnInst>(T))
continue;
Builder.SetInsertPoint(T);
- Function *DisableZAIntr =
- Intrinsic::getDeclaration(M, Intrinsic::aarch64_sme_za_disable);
+ Function *DisableZAIntr = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::aarch64_sme_za_disable);
Builder.CreateCall(DisableZAIntr->getFunctionType(), DisableZAIntr);
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index f408a013d7a379..ea88ed424dc597 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -407,8 +407,8 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
Value *const Identity) const {
Type *AtomicTy = V->getType();
Module *M = B.GetInsertBlock()->getModule();
- Function *UpdateDPP =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy);
+ Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_update_dpp, AtomicTy);
// Reduce within each row of 16 lanes.
for (unsigned Idx = 0; Idx < 4; Idx++) {
@@ -439,8 +439,8 @@ Value *AMDGPUAtomicOptimizerImpl::buildReduction(IRBuilder<> &B,
// Pick an arbitrary lane from 0..31 and an arbitrary lane from 32..63 and
// combine them with a scalar operation.
- Function *ReadLane =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, AtomicTy);
+ Function *ReadLane = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_readlane, AtomicTy);
Value *Lane0 = B.CreateCall(ReadLane, {V, B.getInt32(0)});
Value *Lane32 = B.CreateCall(ReadLane, {V, B.getInt32(32)});
return buildNonAtomicBinOp(B, Op, Lane0, Lane32);
@@ -453,8 +453,8 @@ Value *AMDGPUAtomicOptimizerImpl::buildScan(IRBuilder<> &B,
Value *Identity) const {
Type *AtomicTy = V->getType();
Module *M = B.GetInsertBlock()->getModule();
- Function *UpdateDPP =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy);
+ Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_update_dpp, AtomicTy);
for (unsigned Idx = 0; Idx < 4; Idx++) {
V = buildNonAtomicBinOp(
@@ -513,18 +513,18 @@ Value *AMDGPUAtomicOptimizerImpl::buildShiftRight(IRBuilder<> &B, Value *V,
Value *Identity) const {
Type *AtomicTy = V->getType();
Module *M = B.GetInsertBlock()->getModule();
- Function *UpdateDPP =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_update_dpp, AtomicTy);
+ Function *UpdateDPP = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_update_dpp, AtomicTy);
if (ST->hasDPPWavefrontShifts()) {
// GFX9 has DPP wavefront shift operations.
V = B.CreateCall(UpdateDPP,
{Identity, V, B.getInt32(DPP::WAVE_SHR1), B.getInt32(0xf),
B.getInt32(0xf), B.getFalse()});
} else {
- Function *ReadLane =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_readlane, AtomicTy);
- Function *WriteLane =
- Intrinsic::getDeclaration(M, Intrinsic::amdgcn_writelane, AtomicTy);
+ Function *ReadLane = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_readlane, AtomicTy);
+ Function *WriteLane = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::amdgcn_writelane, AtomicTy);
// On GFX10 all DPP operations are confined to a single row. To get cross-
// row operations we have to use permlane or readlane.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 052e1140533f3f..7d3164c79089e0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -119,8 +119,8 @@ class AMDGPUCodeGenPrepareImpl
return SqrtF32;
LLVMContext &Ctx = Mod->getContext();
- SqrtF32 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_sqrt,
- {Type::getFloatTy(Ctx)});
+ SqrtF32 = Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::amdgcn_sqrt,
+ {Type::getFloatTy(Ctx)});
return SqrtF32;
}
@@ -129,7 +129,7 @@ class AMDGPUCodeGenPrepareImpl
return LdexpF32;
LLVMContext &Ctx = Mod->getContext();
- LdexpF32 = Intrinsic::getDeclaration(
+ LdexpF32 = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::ldexp, {Type::getFloatTy(Ctx), Type::getInt32Ty(Ctx)});
return LdexpF32;
}
@@ -577,7 +577,7 @@ bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
Type *I32Ty = getI32Ty(Builder, I.getType());
Function *I32 =
- Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::bitreverse, {I32Ty});
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
Value *LShrOp =
@@ -1260,8 +1260,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
: Builder.CreateUIToFP(IB,F32Ty);
- Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
- Builder.getFloatTy());
+ Function *RcpDecl = Intrinsic::getOrInsertDeclaration(
+ Mod, Intrinsic::amdgcn_rcp, Builder.getFloatTy());
Value *RCP = Builder.CreateCall(RcpDecl, { FB });
Value *FQM = Builder.CreateFMul(FA, RCP);
@@ -1455,7 +1455,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
// Initial estimate of inv(y).
Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
- Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
+ Function *Rcp =
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
index 45207c06a788a2..e48fed025857fa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUImageIntrinsicOptimizer.cpp
@@ -237,7 +237,7 @@ bool optimizeSection(ArrayRef<SmallVector<IntrinsicInst *, 4>> MergeableInsts) {
else
NewIntrinID = Intrinsic::amdgcn_image_msaa_load_2darraymsaa;
- Function *NewIntrin = Intrinsic::getDeclaration(
+ Function *NewIntrin = Intrinsic::getOrInsertDeclaration(
IIList.front()->getModule(), NewIntrinID, OverloadTys);
Args[ImageDimIntr->DMaskIndex] =
ConstantInt::get(DMask->getType(), NewMaskVal);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index ecb4d4fa5d5c39..6a5a48778197e4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -130,7 +130,8 @@ static std::optional<Instruction *> modifyIntrinsicCall(
// Modify arguments and types
Func(Args, ArgTys);
- Function *I = Intrinsic::getDeclaration(OldIntr.getModule(), NewIntr, ArgTys);
+ Function *I =
+ Intrinsic::getOrInsertDeclaration(OldIntr.getModule(), NewIntr, ArgTys);
CallInst *NewCall = IC.Builder.CreateCall(I, Args);
NewCall->takeName(&OldIntr);
@@ -502,7 +503,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
if (IID == Intrinsic::sqrt && !canContractSqrtToRsq(SqrtOp))
break;
- Function *NewDecl = Intrinsic::getDeclaration(
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
SrcCI->getModule(), Intrinsic::amdgcn_rsq, {SrcCI->getType()});
InnerFMF |= FMF;
@@ -527,7 +528,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// f16 amdgcn.sqrt is identical to regular sqrt.
if (IID == Intrinsic::amdgcn_sqrt && Src->getType()->isHalfTy()) {
- Function *NewDecl = Intrinsic::getDeclaration(
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
II.getModule(), Intrinsic::sqrt, {II.getType()});
II.setCalledFunction(NewDecl);
return &II;
@@ -614,7 +615,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
Value *Src1 = II.getArgOperand(1);
const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
if (CMask) {
- II.setCalledOperand(Intrinsic::getDeclaration(
+ II.setCalledOperand(Intrinsic::getOrInsertDeclaration(
II.getModule(), Intrinsic::is_fpclass, Src0->getType()));
// Clamp any excess bits, as they're illegal for the generic intrinsic.
@@ -890,7 +891,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// register (which contains the bitmask of live threads). So a
// comparison that always returns true is the same as a read of the
// EXEC register.
- Function *NewF = Intrinsic::getDeclaration(
+ Function *NewF = Intrinsic::getOrInsertDeclaration(
II.getModule(), Intrinsic::read_register, II.getType());
Metadata *MDArgs[] = {MDString::get(II.getContext(), "exec")};
MDNode *MD = MDNode::get(II.getContext(), MDArgs);
@@ -989,7 +990,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
} else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
break;
- Function *NewF = Intrinsic::getDeclaration(
+ Function *NewF = Intrinsic::getOrInsertDeclaration(
II.getModule(), NewIID, {II.getType(), SrcLHS->getType()});
Value *Args[] = {SrcLHS, SrcRHS,
ConstantInt::get(CC->getType(), SrcPred)};
@@ -1205,7 +1206,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// If we can prove we don't have one of the special cases then we can use a
// normal fma instead.
if (canSimplifyLegacyMulToMul(II, Op0, Op1, IC)) {
- II.setCalledOperand(Intrinsic::getDeclaration(
+ II.setCalledOperand(Intrinsic::getOrInsertDeclaration(
II.getModule(), Intrinsic::fma, II.getType()));
return &II;
}
@@ -1401,7 +1402,7 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
Args[0] = IC.Builder.CreateShuffleVector(II.getOperand(0), EltMask);
}
- Function *NewIntrin = Intrinsic::getDeclaration(
+ Function *NewIntrin = Intrinsic::getOrInsertDeclaration(
II.getModule(), II.getIntrinsicID(), OverloadTys);
CallInst *NewCall = IC.Builder.CreateCall(NewIntrin, Args);
NewCall->takeName(&II);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 53628981e12409..800bdbe04cf70d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1555,8 +1555,8 @@ bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
MIB.addImm(MFI->getLDSSize());
} else {
Module *M = MF->getFunction().getParent();
- const GlobalValue *GV
- = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
+ const GlobalValue *GV =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index e01c9dc66a3f1f..eb553ae4eb80ff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -753,7 +753,7 @@ bool AMDGPULibCalls::fold(CallInst *CI) {
CI->setArgOperand(1, SplatArg1);
}
- CI->setCalledFunction(Intrinsic::getDeclaration(
+ CI->setCalledFunction(Intrinsic::getOrInsertDeclaration(
CI->getModule(), Intrinsic::ldexp,
{CI->getType(), CI->getArgOperand(1)->getType()}));
return true;
@@ -1034,7 +1034,8 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
// pown/pow ---> powr(fabs(x), y) | (x & ((int)y << 31))
FunctionCallee ExpExpr;
if (ShouldUseIntrinsic)
- ExpExpr = Intrinsic::getDeclaration(M, Intrinsic::exp2, {FPOp->getType()});
+ ExpExpr = Intrinsic::getOrInsertDeclaration(M, Intrinsic::exp2,
+ {FPOp->getType()});
else {
ExpExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_EXP2, FInfo));
if (!ExpExpr)
@@ -1108,8 +1109,8 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
if (needlog) {
FunctionCallee LogExpr;
if (ShouldUseIntrinsic) {
- LogExpr =
- Intrinsic::getDeclaration(M, Intrinsic::log2, {FPOp->getType()});
+ LogExpr = Intrinsic::getOrInsertDeclaration(M, Intrinsic::log2,
+ {FPOp->getType()});
} else {
LogExpr = getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_LOG2, FInfo));
if (!LogExpr)
@@ -1298,8 +1299,8 @@ void AMDGPULibCalls::replaceLibCallWithSimpleIntrinsic(IRBuilder<> &B,
}
}
- CI->setCalledFunction(
- Intrinsic::getDeclaration(CI->getModule(), IntrID, {CI->getType()}));
+ CI->setCalledFunction(Intrinsic::getOrInsertDeclaration(
+ CI->getModule(), IntrID, {CI->getType()}));
}
bool AMDGPULibCalls::tryReplaceLibcallWithSimpleIntrinsic(
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 51a5b7702c0093..ff5eb81490106f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -285,8 +285,8 @@ class AMDGPULowerModuleLDS {
BasicBlock *Entry = &Func->getEntryBlock();
IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
- Function *Decl =
- Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
+ Function *Decl = Intrinsic::getOrInsertDeclaration(
+ Func->getParent(), Intrinsic::donothing, {});
Value *UseInstance[1] = {
Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
@@ -529,8 +529,8 @@ class AMDGPULowerModuleLDS {
// block to spare deduplicating it later.
auto [It, Inserted] = tableKernelIndexCache.try_emplace(F);
if (Inserted) {
- Function *Decl =
- Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
+ Function *Decl = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::amdgcn_lds_kernel_id, {});
auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
IRBuilder<> Builder(&*InsertAt);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 24bfbff41ec5c0..63da3443479be3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -973,10 +973,10 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
if (!IsAMDHSA) {
- Function *LocalSizeYFn =
- Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
- Function *LocalSizeZFn =
- Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
+ Function *LocalSizeYFn = Intrinsic::getOrInsertDeclaration(
+ Mod, Intrinsic::r600_read_local_size_y);
+ Function *LocalSizeZFn = Intrinsic::getOrInsertDeclaration(
+ Mod, Intrinsic::r600_read_local_size_z);
CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
@@ -1022,7 +1022,7 @@ AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
// } hsa_kernel_dispatch_packet_t
//
Function *DispatchPtrFn =
- Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
DispatchPtr->addRetAttr(Attribute::NoAlias);
@@ -1082,7 +1082,7 @@ Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
llvm_unreachable("invalid dimension");
}
- Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
+ Function *WorkitemIdFn = Intrinsic::getOrInsertDeclaration(Mod, IntrID);
CallInst *CI = Builder.CreateCall(WorkitemIdFn);
ST.makeLIDRangeMetadata(CI);
F->removeFnAttr(AttrName);
@@ -1564,7 +1564,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
continue;
case Intrinsic::objectsize: {
Value *Src = Intr->getOperand(0);
- Function *ObjectSize = Intrinsic::getDeclaration(
+ Function *ObjectSize = Intrinsic::getOrInsertDeclaration(
Mod, Intrinsic::objectsize,
{Intr->getType(),
PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
index 4669bb45473cb0..cfce56f0bfe968 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSwLowerLDS.cpp
@@ -336,8 +336,8 @@ static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
BasicBlock *Entry = &Func->getEntryBlock();
IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
- Function *Decl =
- Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
+ Function *Decl = Intrinsic::getOrInsertDeclaration(Func->getParent(),
+ Intrinsic::donothing, {});
Value *UseInstance[1] = {
Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
@@ -922,7 +922,8 @@ void AMDGPUSwLowerLDS::lowerKernelLDSAccesses(Function *Func,
StringRef("__asan_free_impl"),
FunctionType::get(IRB.getVoidTy(), {Int64Ty, Int64Ty}, false));
Value *ReturnAddr = IRB.CreateCall(
- Intrinsic::getDeclaration(&M, Intrinsic::returnaddress), IRB.getInt32(0));
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::returnaddress),
+ IRB.getInt32(0));
Value *RAPToInt = IRB.CreatePtrToInt(ReturnAddr, Int64Ty);
Value *MallocPtrToInt = IRB.CreatePtrToInt(LoadMallocPtr, Int64Ty);
IRB.CreateCall(AsanFreeFunc, {MallocPtrToInt, RAPToInt});
@@ -1055,8 +1056,8 @@ void AMDGPUSwLowerLDS::lowerNonKernelLDSAccesses(
SetVector<Instruction *> LDSInstructions;
getLDSMemoryInstructions(Func, LDSInstructions);
- Function *Decl =
- Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
+ Function *Decl = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::amdgcn_lds_kernel_id, {});
auto *KernelId = IRB.CreateCall(Decl, {});
GlobalVariable *LDSBaseTable = NKLDSParams.LDSBaseTable;
GlobalVariable *LDSOffsetTable = NKLDSParams.LDSOffsetTable;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index d701bf037fdfa6..5d7ca89571b27b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1112,8 +1112,8 @@ Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
if (!AMDGPU::isExtendedGlobalAddrSpace(NewAS))
return nullptr;
Module *M = II->getModule();
- Function *NewDecl = Intrinsic::getDeclaration(M, II->getIntrinsicID(),
- {DestTy, SrcTy, DestTy});
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
+ M, II->getIntrinsicID(), {DestTy, SrcTy, DestTy});
II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return II;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
index 3758c768b8673f..59cc61e347bc0a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp
@@ -295,8 +295,8 @@ bool AMDGPUUnifyDivergentExitNodesImpl::run(Function &F, DominatorTree *DT,
// Remove and delete the unreachable inst.
UnreachableBlock->getTerminator()->eraseFromParent();
- Function *UnreachableIntrin =
- Intrinsic::getDeclaration(F.getParent(), Intrinsic::amdgcn_unreachable);
+ Function *UnreachableIntrin = Intrinsic::getOrInsertDeclaration(
+ F.getParent(), Intrinsic::amdgcn_unreachable);
// Insert a call to an intrinsic tracking that this is an unreachable
// point, in case we want to kill the active lanes or something later.
diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index edd881c84078c6..a7f2b66e3cd116 100644
--- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -117,13 +117,15 @@ void SIAnnotateControlFlow::initialize(Module &M, const GCNSubtarget &ST) {
BoolUndef = PoisonValue::get(Boolean);
IntMaskZero = ConstantInt::get(IntMask, 0);
- If = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if, { IntMask });
- Else = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_else,
- { IntMask, IntMask });
- IfBreak = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_if_break,
- { IntMask });
- Loop = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_loop, { IntMask });
- EndCf = Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_end_cf, { IntMask });
+ If = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::amdgcn_if, {IntMask});
+ Else = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::amdgcn_else,
+ {IntMask, IntMask});
+ IfBreak = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::amdgcn_if_break,
+ {IntMask});
+ Loop =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::amdgcn_loop, {IntMask});
+ EndCf = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::amdgcn_end_cf,
+ {IntMask});
}
/// Is the branch condition uniform or did the StructurizeCFG pass
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index bf757edfa85890..a35582bebb08a3 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21149,7 +21149,7 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
// Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
// here.
if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
- Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
+ Function *MCR = Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_mcr);
Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
Builder.getInt32(0), Builder.getInt32(7),
Builder.getInt32(10), Builder.getInt32(5)};
@@ -21160,7 +21160,7 @@ Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
llvm_unreachable("makeDMB on a target so old that it has no barriers");
}
} else {
- Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
+ Function *DMB = Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_dmb);
// Only a full system barrier exists in the M-class architectures.
Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
Constant *CDomain = Builder.getInt32(Domain);
@@ -21417,7 +21417,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
if (ValueTy->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
- Function *Ldrex = Intrinsic::getDeclaration(M, Int);
+ Function *Ldrex = Intrinsic::getOrInsertDeclaration(M, Int);
Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
@@ -21433,7 +21433,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
Type *Tys[] = { Addr->getType() };
Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
- Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
+ Function *Ldrex = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
CallInst *CI = Builder.CreateCall(Ldrex, Addr);
CI->addParamAttr(
@@ -21446,7 +21446,8 @@ void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
if (!Subtarget->hasV7Ops())
return;
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
+ Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_clrex));
}
Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
@@ -21461,7 +21462,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
if (Val->getType()->getPrimitiveSizeInBits() == 64) {
Intrinsic::ID Int =
IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
- Function *Strex = Intrinsic::getDeclaration(M, Int);
+ Function *Strex = Intrinsic::getOrInsertDeclaration(M, Int);
Type *Int32Ty = Type::getInt32Ty(M->getContext());
Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
@@ -21473,7 +21474,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
Type *Tys[] = { Addr->getType() };
- Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
+ Function *Strex = Intrinsic::getOrInsertDeclaration(M, Int, Tys);
CallInst *CI = Builder.CreateCall(
Strex, {Builder.CreateZExtOrBitCast(
@@ -21601,8 +21602,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
Intrinsic::arm_neon_vld3,
Intrinsic::arm_neon_vld4};
- Function *VldnFunc =
- Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
+ Function *VldnFunc = Intrinsic::getOrInsertDeclaration(
+ LI->getModule(), LoadInts[Factor - 2], Tys);
SmallVector<Value *, 2> Ops;
Ops.push_back(BaseAddr);
@@ -21617,7 +21618,7 @@ bool ARMTargetLowering::lowerInterleavedLoad(
Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
Type *Tys[] = {VecTy, PtrTy};
Function *VldnFunc =
- Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
+ Intrinsic::getOrInsertDeclaration(LI->getModule(), LoadInts, Tys);
SmallVector<Value *, 2> Ops;
Ops.push_back(BaseAddr);
@@ -21762,7 +21763,7 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
Type *Tys[] = {PtrTy, SubVecTy};
- Function *VstNFunc = Intrinsic::getDeclaration(
+ Function *VstNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), StoreInts[Factor - 2], Tys);
SmallVector<Value *, 6> Ops;
@@ -21778,7 +21779,7 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
Type *Tys[] = {PtrTy, SubVecTy};
Function *VstNFunc =
- Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
+ Intrinsic::getOrInsertDeclaration(SI->getModule(), StoreInts, Tys);
SmallVector<Value *, 6> Ops;
Ops.push_back(BaseAddr);
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index 861d60d3bcce95..7804725ce77319 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -630,13 +630,14 @@ void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
Value* Args[] = { WideLd0, WideLd1, Acc };
Function *SMLAD = nullptr;
if (Exchange)
- SMLAD = Acc->getType()->isIntegerTy(32) ?
- Intrinsic::getDeclaration(M, Intrinsic::arm_smladx) :
- Intrinsic::getDeclaration(M, Intrinsic::arm_smlaldx);
+ SMLAD =
+ Acc->getType()->isIntegerTy(32)
+ ? Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_smladx)
+ : Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_smlaldx);
else
- SMLAD = Acc->getType()->isIntegerTy(32) ?
- Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
- Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
+ SMLAD = Acc->getType()->isIntegerTy(32)
+ ? Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_smlad)
+ : Intrinsic::getOrInsertDeclaration(M, Intrinsic::arm_smlald);
IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
BasicBlock::iterator(InsertAfter));
diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index e554e4d428d46f..60211db8a61ae3 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -401,7 +401,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
case 8: VCTPID = Intrinsic::arm_mve_vctp16; break;
case 16: VCTPID = Intrinsic::arm_mve_vctp8; break;
}
- Function *VCTP = Intrinsic::getDeclaration(M, VCTPID);
+ Function *VCTP = Intrinsic::getOrInsertDeclaration(M, VCTPID);
Value *VCTPCall = Builder.CreateCall(VCTP, Processed);
ActiveLaneMask->replaceAllUsesWith(VCTPCall);
diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
index 4be6220b358ba3..7921518166f97d 100644
--- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
+++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp
@@ -103,7 +103,7 @@ uint32_t BPFCoreSharedInfo::SeqNum;
Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB,
Instruction *Input,
Instruction *Before) {
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()});
Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()),
BPFCoreSharedInfo::SeqNum++);
diff --git a/llvm/lib/Target/BPF/BPFAdjustOpt.cpp b/llvm/lib/Target/BPF/BPFAdjustOpt.cpp
index 4ab0cbcc924779..4ca7bbe9c2a8c4 100644
--- a/llvm/lib/Target/BPF/BPFAdjustOpt.cpp
+++ b/llvm/lib/Target/BPF/BPFAdjustOpt.cpp
@@ -126,7 +126,7 @@ bool BPFAdjustOptImpl::adjustICmpToBuiltin() {
Constant *Opcode =
ConstantInt::get(Type::getInt32Ty(BB.getContext()), Op);
- Function *Fn = Intrinsic::getDeclaration(
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::bpf_compare, {Op0->getType(), ConstOp1->getType()});
auto *NewInst = CallInst::Create(Fn, {Opcode, Op0, ConstOp1});
NewInst->insertBefore(&I);
diff --git a/llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp b/llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp
index 5d8339b4a44cec..9f7e3414beb8e3 100644
--- a/llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp
+++ b/llvm/lib/Target/BPF/BPFPreserveStaticOffset.cpp
@@ -163,7 +163,7 @@ static CallInst *makeIntrinsicCall(Module *M,
ArrayRef<Type *> Types,
ArrayRef<Value *> Args) {
- Function *Fn = Intrinsic::getDeclaration(M, Intrinsic, Types);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, Intrinsic, Types);
return CallInst::Create(Fn, Args);
}
diff --git a/llvm/lib/Target/DirectX/DXILOpLowering.cpp b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
index c0f8d433833ee7..99df4850872078 100644
--- a/llvm/lib/Target/DirectX/DXILOpLowering.cpp
+++ b/llvm/lib/Target/DirectX/DXILOpLowering.cpp
@@ -134,8 +134,8 @@ class OpLowerer {
/// piecemeal way - we can add the casts in to avoid updating all of the uses
/// or defs, and by the end all of the casts will be redundant.
Value *createTmpHandleCast(Value *V, Type *Ty) {
- Function *CastFn = Intrinsic::getDeclaration(&M, Intrinsic::dx_cast_handle,
- {Ty, V->getType()});
+ Function *CastFn = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::dx_cast_handle, {Ty, V->getType()});
CallInst *Cast = OpBuilder.getIRB().CreateCall(CastFn, {V});
CleanupCasts.push_back(Cast);
return Cast;
diff --git a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp
index 3274f9162b543a..65bbb1364488f7 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp
@@ -212,7 +212,7 @@ bool HexagonGenExtract::convert(Instruction *In) {
Intrinsic::ID IntId = (BW == 32) ? Intrinsic::hexagon_S2_extractu
: Intrinsic::hexagon_S2_extractup;
Module *Mod = BB->getParent()->getParent();
- Function *ExtF = Intrinsic::getDeclaration(Mod, IntId);
+ Function *ExtF = Intrinsic::getOrInsertDeclaration(Mod, IntId);
Value *NewIn = IRB.CreateCall(ExtF, {BF, IRB.getInt32(W), IRB.getInt32(SR)});
if (SL != 0)
NewIn = IRB.CreateShl(NewIn, SL, CSL->getName());
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 856c952e785dac..03c12f5ce44707 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3865,7 +3865,7 @@ Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder,
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
: Intrinsic::hexagon_L4_loadd_locked;
- Function *Fn = Intrinsic::getDeclaration(M, IntID);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, IntID);
Value *Call = Builder.CreateCall(Fn, Addr, "larx");
@@ -3886,7 +3886,7 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
: Intrinsic::hexagon_S4_stored_locked;
- Function *Fn = Intrinsic::getDeclaration(M, IntID);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, IntID);
Val = Builder.CreateBitCast(Val, CastTy);
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 4ef009c87a1e63..705e1f43851f7a 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1532,7 +1532,8 @@ Value *PolynomialMultiplyRecognize::generate(BasicBlock::iterator At,
ParsedValues &PV) {
IRBuilder<> B(&*At);
Module *M = At->getParent()->getParent()->getParent();
- Function *PMF = Intrinsic::getDeclaration(M, Intrinsic::hexagon_M4_pmpyw);
+ Function *PMF =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::hexagon_M4_pmpyw);
Value *P = PV.P, *Q = PV.Q, *P0 = P;
unsigned IC = PV.IterCount;
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index f4e495266eae3f..d2cfd3851e711d 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -2390,8 +2390,8 @@ auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo,
Type *Int64Ty = Type::getInt64Ty(F.getContext());
Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty, "cst");
Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty, "cst");
- Function *FI = Intrinsic::getDeclaration(F.getParent(),
- Intrinsic::hexagon_S2_valignrb);
+ Function *FI = Intrinsic::getOrInsertDeclaration(
+ F.getParent(), Intrinsic::hexagon_S2_valignrb);
Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt}, "cup");
return Builder.CreateBitCast(Call, Lo->getType(), "cst");
}
@@ -2587,12 +2587,13 @@ auto HexagonVectorCombine::createHvxIntrinsic(IRBuilderBase &Builder,
unsigned HwLen = HST.getVectorLength();
Intrinsic::ID TC = HwLen == 64 ? Intrinsic::hexagon_V6_pred_typecast
: Intrinsic::hexagon_V6_pred_typecast_128B;
- Function *FI =
- Intrinsic::getDeclaration(F.getParent(), TC, {DestTy, Val->getType()});
+ Function *FI = Intrinsic::getOrInsertDeclaration(F.getParent(), TC,
+ {DestTy, Val->getType()});
return Builder.CreateCall(FI, {Val}, "cup");
};
- Function *IntrFn = Intrinsic::getDeclaration(F.getParent(), IntID, ArgTys);
+ Function *IntrFn =
+ Intrinsic::getOrInsertDeclaration(F.getParent(), IntID, ArgTys);
FunctionType *IntrTy = IntrFn->getFunctionType();
SmallVector<Value *, 4> IntrArgs;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index bfafb331752108..8edca34624e9b2 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -5808,7 +5808,7 @@ Value *LoongArchTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
Type *Tys[] = {AlignedAddr->getType()};
Function *MaskedCmpXchg =
- Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
Value *Result = Builder.CreateCall(
MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, FailureOrdering});
Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
@@ -5838,7 +5838,7 @@ Value *LoongArchTargetLowering::emitMaskedAtomicRMWIntrinsic(
Value *Ordering =
Builder.getIntN(GRLen, static_cast<uint64_t>(AI->getOrdering()));
Type *Tys[] = {AlignedAddr->getType()};
- Function *LlwOpScwLoop = Intrinsic::getDeclaration(
+ Function *LlwOpScwLoop = Intrinsic::getOrInsertDeclaration(
AI->getModule(),
getIntrinsicForMaskedAtomicRMWBinOp(GRLen, AI->getOperation()), Tys);
diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
index 082546c4dd72f8..1e30e0113e43c7 100644
--- a/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp
@@ -282,7 +282,7 @@ static void convertToParamAS(Use *OldUse, Value *Param, bool HasCvtaParam,
[](Value *Addr, Instruction *OriginalUser) -> Value * {
PointerType *ReturnTy =
PointerType::get(OriginalUser->getContext(), ADDRESS_SPACE_GENERIC);
- Function *CvtToGen = Intrinsic::getDeclaration(
+ Function *CvtToGen = Intrinsic::getOrInsertDeclaration(
OriginalUser->getModule(), Intrinsic::nvvm_ptr_param_to_gen,
{ReturnTy, PointerType::get(OriginalUser->getContext(),
ADDRESS_SPACE_PARAM)});
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index 9a8ea8f87896ad..b141229dcfc733 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -360,7 +360,8 @@ static Instruction *simplifyNvvmIntrinsic(IntrinsicInst *II, InstCombiner &IC) {
// type argument, equal to that of the nvvm intrinsic's argument.
Type *Tys[] = {II->getArgOperand(0)->getType()};
return CallInst::Create(
- Intrinsic::getDeclaration(II->getModule(), *Action.IID, Tys), Args);
+ Intrinsic::getOrInsertDeclaration(II->getModule(), *Action.IID, Tys),
+ Args);
}
// Simplify to target-generic binary op.
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index d9847a21489e63..911d92f0c4846b 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12181,7 +12181,7 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- Function *Func = Intrinsic::getDeclaration(M, Id);
+ Function *Func = Intrinsic::getOrInsertDeclaration(M, Id);
return Builder.CreateCall(Func, {});
}
@@ -12206,7 +12206,7 @@ Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
// and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
if (isa<LoadInst>(Inst))
return Builder.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
Builder.GetInsertBlock()->getParent()->getParent(),
Intrinsic::ppc_cfence, {Inst->getType()}),
{Inst});
@@ -19005,7 +19005,7 @@ Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
Type *ValTy = Incr->getType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
- Function *RMW = Intrinsic::getDeclaration(
+ Function *RMW = Intrinsic::getOrInsertDeclaration(
M, getIntrinsicForAtomicRMWBinOp128(AI->getOperation()));
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
@@ -19028,7 +19028,7 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
Type *ValTy = CmpVal->getType();
assert(ValTy->getPrimitiveSizeInBits() == 128);
Function *IntCmpXchg =
- Intrinsic::getDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::ppc_cmpxchg_i128);
Type *Int64Ty = Type::getInt64Ty(M->getContext());
Value *CmpLo = Builder.CreateTrunc(CmpVal, Int64Ty, "cmp_lo");
Value *CmpHi =
diff --git a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
index d10fe11bb5877b..9c2b58a47392f9 100644
--- a/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLowerMASSVEntries.cpp
@@ -123,7 +123,7 @@ bool PPCLowerMASSVEntries::handlePowSpecialCases(CallInst *CI, Function &Func,
return false;
CI->setCalledFunction(
- Intrinsic::getDeclaration(&M, Intrinsic::pow, CI->getType()));
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::pow, CI->getType()));
return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 230ccd8209e1f2..1f9fc984515cf6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20608,7 +20608,7 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
Value *Ordering =
Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
Type *Tys[] = {AlignedAddr->getType()};
- Function *LrwOpScwLoop = Intrinsic::getDeclaration(
+ Function *LrwOpScwLoop = Intrinsic::getOrInsertDeclaration(
AI->getModule(),
getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
@@ -20672,7 +20672,7 @@ Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
}
Type *Tys[] = {AlignedAddr->getType()};
Function *MaskedCmpXchg =
- Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
+ Intrinsic::getOrInsertDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
Value *Result = Builder.CreateCall(
MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
if (XLen == 64)
@@ -21170,7 +21170,7 @@ bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {
static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {
Module *M = IRB.GetInsertBlock()->getModule();
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
IRB.CreateCall(ThreadPointerFunc), Offset);
}
@@ -21287,9 +21287,9 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
auto *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
- Function *VlsegNFunc =
- Intrinsic::getDeclaration(LI->getModule(), FixedVlsegIntrIds[Factor - 2],
- {VTy, LI->getPointerOperandType(), XLenTy});
+ Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
+ LI->getModule(), FixedVlsegIntrIds[Factor - 2],
+ {VTy, LI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
@@ -21341,9 +21341,9 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
auto *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
- Function *VssegNFunc =
- Intrinsic::getDeclaration(SI->getModule(), FixedVssegIntrIds[Factor - 2],
- {VTy, SI->getPointerOperandType(), XLenTy});
+ Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
+ SI->getModule(), FixedVssegIntrIds[Factor - 2],
+ {VTy, SI->getPointerOperandType(), XLenTy});
auto Mask = SVI->getShuffleMask();
SmallVector<Value *, 10> Ops;
@@ -21388,7 +21388,7 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
Type *XLenTy = Type::getIntNTy(LI->getContext(), Subtarget.getXLen());
if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
- Function *VlsegNFunc = Intrinsic::getDeclaration(
+ Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), FixedVlsegIntrIds[Factor - 2],
{ResVTy, LI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
@@ -21408,7 +21408,7 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
NumElts * SEW / 8),
Factor);
- Function *VlsegNFunc = Intrinsic::getDeclaration(
+ Function *VlsegNFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), IntrIds[Factor - 2], {VecTupTy, XLenTy});
Value *VL = Constant::getAllOnesValue(XLenTy);
@@ -21418,7 +21418,7 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
SmallVector<Type *, 2> AggrTypes{Factor, ResVTy};
Return = PoisonValue::get(StructType::get(LI->getContext(), AggrTypes));
- Function *VecExtractFunc = Intrinsic::getDeclaration(
+ Function *VecExtractFunc = Intrinsic::getOrInsertDeclaration(
LI->getModule(), Intrinsic::riscv_tuple_extract, {ResVTy, VecTupTy});
for (unsigned i = 0; i < Factor; ++i) {
Value *VecExtract =
@@ -21454,7 +21454,7 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
Type *XLenTy = Type::getIntNTy(SI->getContext(), Subtarget.getXLen());
if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
- Function *VssegNFunc = Intrinsic::getDeclaration(
+ Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), FixedVssegIntrIds[Factor - 2],
{InVTy, SI->getPointerOperandType(), XLenTy});
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
@@ -21475,12 +21475,12 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
NumElts * SEW / 8),
Factor);
- Function *VssegNFunc = Intrinsic::getDeclaration(
+ Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), IntrIds[Factor - 2], {VecTupTy, XLenTy});
Value *VL = Constant::getAllOnesValue(XLenTy);
- Function *VecInsertFunc = Intrinsic::getDeclaration(
+ Function *VecInsertFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), Intrinsic::riscv_tuple_insert, {VecTupTy, InVTy});
Value *StoredVal = PoisonValue::get(VecTupTy);
for (unsigned i = 0; i < Factor; ++i)
diff --git a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
index 1872b238d1077a..ecf9b6ddae1fc3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp
@@ -353,11 +353,11 @@ static void lowerExpectAssume(IntrinsicInst *II) {
// We need to lower this into a builtin and then the builtin into a SPIR-V
// instruction.
if (II->getIntrinsicID() == Intrinsic::assume) {
- Function *F = Intrinsic::getDeclaration(
+ Function *F = Intrinsic::getOrInsertDeclaration(
II->getModule(), Intrinsic::SPVIntrinsics::spv_assume);
II->setCalledFunction(F);
} else if (II->getIntrinsicID() == Intrinsic::expect) {
- Function *F = Intrinsic::getDeclaration(
+ Function *F = Intrinsic::getOrInsertDeclaration(
II->getModule(), Intrinsic::SPVIntrinsics::spv_expect,
{II->getOperand(0)->getType()});
II->setCalledFunction(F);
@@ -372,12 +372,12 @@ static bool toSpvOverloadedIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID,
ArrayRef<unsigned> OpNos) {
Function *F = nullptr;
if (OpNos.empty()) {
- F = Intrinsic::getDeclaration(II->getModule(), NewID);
+ F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID);
} else {
SmallVector<Type *, 4> Tys;
for (unsigned OpNo : OpNos)
Tys.push_back(II->getOperand(OpNo)->getType());
- F = Intrinsic::getDeclaration(II->getModule(), NewID, Tys);
+ F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID, Tys);
}
II->setCalledFunction(F);
return true;
diff --git a/llvm/lib/Target/SystemZ/SystemZTDC.cpp b/llvm/lib/Target/SystemZ/SystemZTDC.cpp
index f62afb8ddfcfae..345327e880ecd5 100644
--- a/llvm/lib/Target/SystemZ/SystemZTDC.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZTDC.cpp
@@ -366,8 +366,8 @@ bool SystemZTDCPass::runOnFunction(Function &F) {
if (!Worthy)
continue;
// Call the intrinsic, compare result with 0.
- Function *TDCFunc =
- Intrinsic::getDeclaration(&M, Intrinsic::s390_tdc, V->getType());
+ Function *TDCFunc = Intrinsic::getOrInsertDeclaration(
+ &M, Intrinsic::s390_tdc, V->getType());
IRBuilder<> IRB(I);
Value *MaskVal = ConstantInt::get(Type::getInt64Ty(Ctx), Mask);
Instruction *TDC = IRB.CreateCall(TDCFunc, {V, MaskVal});
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index c040e560be605f..b999f83507f4ce 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -1016,7 +1016,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
// wasm.catch() will be lowered down to wasm 'catch' instruction in
// instruction selection.
- CatchF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_catch);
+ CatchF = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::wasm_catch);
// Type for struct __WasmLongjmpArgs
LongjmpArgsTy = StructType::get(Int8PtrTy, // env
Int32Ty // val
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
index 2594430d1d8f3a..c61aa5eff4a708 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerRefTypesIntPtrConv.cpp
@@ -72,7 +72,7 @@ bool WebAssemblyLowerRefTypesIntPtrConv::runOnFunction(Function &F) {
I->replaceAllUsesWith(U);
Function *TrapIntrin =
- Intrinsic::getDeclaration(F.getParent(), Intrinsic::debugtrap);
+ Intrinsic::getOrInsertDeclaration(F.getParent(), Intrinsic::debugtrap);
CallInst::Create(TrapIntrin, {}, "", I->getIterator());
worklist.insert(&*I);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7a6d20c6a121b6..de88db22279797 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -31163,12 +31163,14 @@ void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
- BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
+ BitTest = Intrinsic::getOrInsertDeclaration(AI->getModule(), IID_C,
+ AI->getType());
unsigned Imm = llvm::countr_zero(C->getZExtValue());
Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
} else {
- BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
+ BitTest = Intrinsic::getOrInsertDeclaration(AI->getModule(), IID_I,
+ AI->getType());
assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
@@ -31328,7 +31330,7 @@ void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
break;
}
Function *CmpArith =
- Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
+ Intrinsic::getOrInsertDeclaration(AI->getModule(), IID, AI->getType());
Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
PointerType::getUnqual(Ctx));
Value *Call = Builder.CreateCall(
@@ -31444,7 +31446,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
return nullptr;
Function *MFence =
- llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
+ llvm::Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_sse2_mfence);
Builder.CreateCall(MFence, {});
// Finally we can emit the atomic load.
diff --git a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
index 77139f38c977bb..c4374984da4b9e 100644
--- a/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp
@@ -1876,7 +1876,8 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
Value *Args[] = {Op0, CILength, CIIndex};
Module *M = II.getModule();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_sse4a_extrqi);
return Builder.CreateCall(F, Args);
}
}
@@ -1975,7 +1976,8 @@ static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
Value *Args[] = {Op0, Op1, CILength, CIIndex};
Module *M = II.getModule();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_sse4a_insertqi);
return Builder.CreateCall(F, Args);
}
diff --git a/llvm/lib/Target/X86/X86PartialReduction.cpp b/llvm/lib/Target/X86/X86PartialReduction.cpp
index 5bbfabcbd67bc6..e88702caa9a52b 100644
--- a/llvm/lib/Target/X86/X86PartialReduction.cpp
+++ b/llvm/lib/Target/X86/X86PartialReduction.cpp
@@ -278,7 +278,7 @@ bool X86PartialReduction::trySADReplacement(Instruction *Op) {
IntrinsicNumElts = 16;
}
- Function *PSADBWFn = Intrinsic::getDeclaration(Op->getModule(), IID);
+ Function *PSADBWFn = Intrinsic::getOrInsertDeclaration(Op->getModule(), IID);
if (NumElts < 16) {
// Pad input with zeroes.
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 963d613ddbfe7d..05fc6f13129f24 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -334,7 +334,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
if (UseStackGuard) {
Value *Val = Builder.CreateLoad(Int32Ty, Cookie);
Value *FrameAddr = Builder.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
TheModule, Intrinsic::frameaddress,
Builder.getPtrTy(
TheModule->getDataLayout().getAllocaAddrSpace())),
@@ -370,7 +370,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
Value *WinEHStatePass::emitEHLSDA(IRBuilder<> &Builder, Function *F) {
return Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_lsda), F);
+ Intrinsic::getOrInsertDeclaration(TheModule, Intrinsic::x86_seh_lsda), F);
}
/// Generate a thunk that puts the LSDA of ParentFunc in EAX and then calls
@@ -624,17 +624,17 @@ void WinEHStatePass::addStateStores(Function &F, WinEHFuncInfo &FuncInfo) {
// that it can recover the original frame pointer.
IRBuilder<> Builder(RegNode->getNextNode());
Value *RegNodeI8 = Builder.CreateBitCast(RegNode, Builder.getPtrTy());
- Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehregnode),
- {RegNodeI8});
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
+ TheModule, Intrinsic::x86_seh_ehregnode),
+ {RegNodeI8});
if (EHGuardNode) {
IRBuilder<> Builder(EHGuardNode->getNextNode());
Value *EHGuardNodeI8 =
Builder.CreateBitCast(EHGuardNode, Builder.getPtrTy());
- Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_seh_ehguard),
- {EHGuardNodeI8});
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
+ TheModule, Intrinsic::x86_seh_ehguard),
+ {EHGuardNodeI8});
}
// Calculate state numbers.
diff --git a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 95962d1a0a240f..3604774ddf35bf 100644
--- a/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/llvm/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -157,8 +157,8 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
for (User *U : Users) {
Instruction *Inst = cast<Instruction>(U);
IRBuilder<> Builder(Inst);
- Function *GetID = Intrinsic::getDeclaration(GV->getParent(),
- Intrinsic::xcore_getid);
+ Function *GetID = Intrinsic::getOrInsertDeclaration(GV->getParent(),
+ Intrinsic::xcore_getid);
Value *ThreadID = Builder.CreateCall(GetID, {});
Value *Addr = Builder.CreateInBoundsGEP(NewGV->getValueType(), NewGV,
{Builder.getInt64(0), ThreadID});
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 9943c3cbb9fc7d..898d55fab2b00d 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -172,7 +172,8 @@ static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
// %cond = phi i32 [ %fsh, %FunnelBB ], [ %ShVal0, %GuardBB ]
// -->
// llvm.fshl.i32(i32 %ShVal0, i32 %ShVal1, i32 %ShAmt)
- Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(Phi.getModule(), IID, Phi.getType());
Phi.replaceAllUsesWith(Builder.CreateCall(F, {ShVal0, ShVal1, ShAmt}));
return true;
}
@@ -331,7 +332,7 @@ static bool tryToRecognizePopCount(Instruction &I) {
m_SpecificInt(Mask55)))) {
LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
IRBuilder<> Builder(&I);
- Function *Func = Intrinsic::getDeclaration(
+ Function *Func = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::ctpop, I.getType());
I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
++NumPopCountRecognized;
@@ -398,8 +399,8 @@ static bool tryToFPToSat(Instruction &I, TargetTransformInfo &TTI) {
return false;
IRBuilder<> Builder(&I);
- Function *Fn = Intrinsic::getDeclaration(I.getModule(), Intrinsic::fptosi_sat,
- {SatTy, FpTy});
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
+ I.getModule(), Intrinsic::fptosi_sat, {SatTy, FpTy});
Value *Sat = Builder.CreateCall(Fn, In);
I.replaceAllUsesWith(Builder.CreateSExt(Sat, IntTy));
return true;
@@ -431,7 +432,7 @@ static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
IRBuilderBase::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(Call->getFastMathFlags());
- Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, Ty);
+ Function *Sqrt = Intrinsic::getOrInsertDeclaration(M, Intrinsic::sqrt, Ty);
Value *NewSqrt = Builder.CreateCall(Sqrt, Arg, "sqrt");
Call->replaceAllUsesWith(NewSqrt);
diff --git a/llvm/lib/Transforms/Coroutines/Coroutines.cpp b/llvm/lib/Transforms/Coroutines/Coroutines.cpp
index 1c45bcd7f6a837..45b9767657c66a 100644
--- a/llvm/lib/Transforms/Coroutines/Coroutines.cpp
+++ b/llvm/lib/Transforms/Coroutines/Coroutines.cpp
@@ -52,7 +52,8 @@ coro::LowererBase::LowererBase(Module &M)
CallInst *coro::LowererBase::makeSubFnCall(Value *Arg, int Index,
Instruction *InsertPt) {
auto *IndexVal = ConstantInt::get(Type::getInt8Ty(Context), Index);
- auto *Fn = Intrinsic::getDeclaration(&TheModule, Intrinsic::coro_subfn_addr);
+ auto *Fn =
+ Intrinsic::getOrInsertDeclaration(&TheModule, Intrinsic::coro_subfn_addr);
assert(Index >= CoroSubFnInst::IndexFirst &&
Index < CoroSubFnInst::IndexLast &&
@@ -183,7 +184,7 @@ void coro::suppressCoroAllocs(LLVMContext &Context,
static CoroSaveInst *createCoroSave(CoroBeginInst *CoroBegin,
CoroSuspendInst *SuspendInst) {
Module *M = SuspendInst->getModule();
- auto *Fn = Intrinsic::getDeclaration(M, Intrinsic::coro_save);
+ auto *Fn = Intrinsic::getOrInsertDeclaration(M, Intrinsic::coro_save);
auto *SaveInst = cast<CoroSaveInst>(
CallInst::Create(Fn, CoroBegin, "", SuspendInst->getIterator()));
assert(!SuspendInst->getCoroSave());
diff --git a/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp b/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp
index 91d445dfc4c734..9e5d9ea31af6c4 100644
--- a/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp
+++ b/llvm/lib/Transforms/IPO/CrossDSOCFI.cpp
@@ -125,7 +125,8 @@ void CrossDSOCFI::buildCFICheck(Module &M) {
ConstantInt *CaseTypeId = ConstantInt::get(Type::getInt64Ty(Ctx), TypeId);
BasicBlock *TestBB = BasicBlock::Create(Ctx, "test", F);
IRBuilder<> IRBTest(TestBB);
- Function *BitsetTestFn = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
+ Function *BitsetTestFn =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::type_test);
Value *Test = IRBTest.CreateCall(
BitsetTestFn, {&Addr, MetadataAsValue::get(
diff --git a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp
index d84856f71c9de6..543987d5981bab 100644
--- a/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp
+++ b/llvm/lib/Transforms/IPO/SampleProfileProbe.cpp
@@ -401,7 +401,7 @@ void SampleProfileProber::instrumentOneFunc(Function &F, TargetMachine *TM) {
assert(Builder.GetInsertPoint() != BB->end() &&
"Cannot get the probing point");
Function *ProbeFn =
- llvm::Intrinsic::getDeclaration(M, Intrinsic::pseudoprobe);
+ llvm::Intrinsic::getOrInsertDeclaration(M, Intrinsic::pseudoprobe);
Value *Args[] = {Builder.getInt64(Guid), Builder.getInt64(Index),
Builder.getInt32(0),
Builder.getInt64(PseudoProbeFullDistributionFactor)};
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 36a1841b363463..59f986b4ca2664 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -856,7 +856,7 @@ void llvm::updatePublicTypeTestCalls(Module &M,
return;
if (hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO)) {
Function *TypeTestFunc =
- Intrinsic::getDeclaration(&M, Intrinsic::type_test);
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::type_test);
for (Use &U : make_early_inc_range(PublicTypeTestFunc->uses())) {
auto *CI = cast<CallInst>(U.getUser());
auto *NewCI = CallInst::Create(
@@ -1187,7 +1187,8 @@ void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo,
Instruction *ThenTerm =
SplitBlockAndInsertIfThen(Cond, &CB, /*Unreachable=*/false);
Builder.SetInsertPoint(ThenTerm);
- Function *TrapFn = Intrinsic::getDeclaration(&M, Intrinsic::debugtrap);
+ Function *TrapFn =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::debugtrap);
auto *CallTrap = Builder.CreateCall(TrapFn);
CallTrap->setDebugLoc(CB.getDebugLoc());
}
@@ -1434,8 +1435,8 @@ void DevirtModule::tryICallBranchFunnel(
}
BasicBlock *BB = BasicBlock::Create(M.getContext(), "", JT, nullptr);
- Function *Intr =
- Intrinsic::getDeclaration(&M, llvm::Intrinsic::icall_branch_funnel, {});
+ Function *Intr = Intrinsic::getOrInsertDeclaration(
+ &M, llvm::Intrinsic::icall_branch_funnel, {});
auto *CI = CallInst::Create(Intr, JTArgs, "", BB);
CI->setTailCallKind(CallInst::TCK_MustTail);
@@ -2026,7 +2027,8 @@ void DevirtModule::scanTypeTestUsers(
}
void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) {
- Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
+ Function *TypeTestFunc =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::type_test);
for (Use &U : llvm::make_early_inc_range(TypeCheckedLoadFunc->uses())) {
auto *CI = dyn_cast<CallInst>(U.getUser());
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index e5c3a20e1a6487..21588aca512758 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1232,7 +1232,8 @@ static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) {
assert(I.getOpcode() == Instruction::Add && "Expecting add instruction");
Type *Ty = I.getType();
auto getUAddSat = [&]() {
- return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty);
+ return Intrinsic::getOrInsertDeclaration(I.getModule(), Intrinsic::uadd_sat,
+ Ty);
};
// add (umin X, ~Y), Y --> uaddsat X, Y
@@ -2127,7 +2128,7 @@ static Instruction *foldSubOfMinMax(BinaryOperator &I,
if (match(Op0, m_c_Add(m_Specific(X), m_Specific(Y))) &&
(Op0->hasOneUse() || Op1->hasOneUse())) {
Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
- Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
+ Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
return CallInst::Create(F, {X, Y});
}
@@ -2150,7 +2151,7 @@ static Instruction *foldSubOfMinMax(BinaryOperator &I,
if (MinMax->isSigned() && match(Y, m_ZeroInt()) &&
match(X, m_NSWSub(m_Specific(Op0), m_Value(Z)))) {
Intrinsic::ID InvID = getInverseMinMaxIntrinsic(MinMax->getIntrinsicID());
- Function *F = Intrinsic::getDeclaration(I.getModule(), InvID, Ty);
+ Function *F = Intrinsic::getOrInsertDeclaration(I.getModule(), InvID, Ty);
return CallInst::Create(F, {Op0, Z});
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 964616a4eb35e2..453071f3f982cd 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2269,7 +2269,8 @@ foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
Builder.CreateBinOp(I.getOpcode(), X->getOperand(0), Y->getOperand(0));
Value *NewOp1 =
Builder.CreateBinOp(I.getOpcode(), X->getOperand(1), Y->getOperand(1));
- Function *F = Intrinsic::getDeclaration(I.getModule(), IID, I.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(I.getModule(), IID, I.getType());
return CallInst::Create(F, {NewOp0, NewOp1, X->getOperand(2)});
}
case Intrinsic::bswap:
@@ -2280,7 +2281,8 @@ foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
: ConstantInt::get(I.getType(), IID == Intrinsic::bswap
? RHSC->byteSwap()
: RHSC->reverseBits()));
- Function *F = Intrinsic::getDeclaration(I.getModule(), IID, I.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(I.getModule(), IID, I.getType());
return CallInst::Create(F, {NewOp0});
}
default:
@@ -3056,7 +3058,8 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
if (auto Opt = IC.convertOrOfShiftsToFunnelShift(Or)) {
auto [IID, FShiftArgs] = *Opt;
- Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(Or.getModule(), IID, Or.getType());
return CallInst::Create(F, FShiftArgs);
}
@@ -3095,7 +3098,7 @@ static Instruction *matchOrConcat(Instruction &Or,
Value *NewUpper = Builder.CreateZExt(Hi, Ty);
NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
- Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty);
+ Function *F = Intrinsic::getOrInsertDeclaration(Or.getModule(), id, Ty);
return Builder.CreateCall(F, BinOp);
};
@@ -4803,7 +4806,8 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
match(II->getArgOperand(1), m_One()) &&
isKnownToBeAPowerOfTwo(II->getArgOperand(0), /*OrZero */ true)) {
IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
- Function *F = Intrinsic::getDeclaration(II->getModule(), IID, Ty);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(II->getModule(), IID, Ty);
return CallInst::Create(F, {II->getArgOperand(0), Builder.getTrue()});
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index f7a9406791801c..51e09b7e7c1437 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -488,7 +488,8 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
// cttz(bitreverse(x)) -> ctlz(x)
if (match(Op0, m_BitReverse(m_Value(X)))) {
Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
- Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(II.getModule(), ID, II.getType());
return CallInst::Create(F, {X, II.getArgOperand(1)});
}
@@ -647,7 +648,7 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
if (Op0->hasOneUse() &&
match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
Function *F =
- Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
+ Intrinsic::getOrInsertDeclaration(II.getModule(), Intrinsic::cttz, Ty);
auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
@@ -657,7 +658,7 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
if (match(Op0,
m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
Function *F =
- Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
+ Intrinsic::getOrInsertDeclaration(II.getModule(), Intrinsic::cttz, Ty);
return CallInst::Create(F, {X, IC.Builder.getFalse()});
}
@@ -1181,7 +1182,8 @@ Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
return nullptr;
// Finally create and return the sat intrinsic, truncated to the new type
- Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
+ Function *F = Intrinsic::getOrInsertDeclaration(MinMax1.getModule(),
+ IntrinsicID, NewTy);
Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
Value *Sat = Builder.CreateCall(F, {AT, BT});
@@ -1286,8 +1288,8 @@ reassociateMinMaxWithConstantInOperand(IntrinsicInst *II,
return nullptr;
// max (max X, C), Y --> max (max X, Y), C
- Function *MinMax =
- Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType());
+ Function *MinMax = Intrinsic::getOrInsertDeclaration(II->getModule(),
+ MinMaxID, II->getType());
Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);
NewInner->takeName(Inner);
return CallInst::Create(MinMax, {NewInner, C});
@@ -1346,7 +1348,8 @@ static Instruction *factorizeMinMaxTree(IntrinsicInst *II) {
return nullptr;
Module *Mod = II->getModule();
- Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
+ Function *MinMax =
+ Intrinsic::getOrInsertDeclaration(Mod, MinMaxID, II->getType());
return CallInst::Create(MinMax, { MinMaxOp, ThirdOp });
}
@@ -1571,7 +1574,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
Type *Tys[3] = { CI.getArgOperand(0)->getType(),
CI.getArgOperand(1)->getType(),
CI.getArgOperand(2)->getType() };
- CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
+ CI.setCalledFunction(
+ Intrinsic::getOrInsertDeclaration(M, MemCpyID, Tys));
Changed = true;
}
}
@@ -2095,7 +2099,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
Module *Mod = II->getModule();
- Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
+ Function *Fshl =
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::fshl, Ty);
return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
}
assert(IID == Intrinsic::fshl &&
@@ -2115,7 +2120,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
Module *Mod = II->getModule();
- Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
+ Function *Bswap =
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::bswap, Ty);
return CallInst::Create(Bswap, { Op0 });
}
if (Instruction *BitOp =
@@ -2824,7 +2830,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
CallArgs.push_back(II->getArgOperand(4));
}
- Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin);
+ Function *NewFn =
+ Intrinsic::getOrInsertDeclaration(II->getModule(), NewIntrin);
return CallInst::Create(NewFn, CallArgs);
}
case Intrinsic::arm_neon_vtbl1:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 9934c065ebf85f..6c2554ea73b7f8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -95,8 +95,8 @@ Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty,
default:
llvm_unreachable("Unsupported call!");
case Intrinsic::vscale: {
- Function *Fn =
- Intrinsic::getDeclaration(I->getModule(), Intrinsic::vscale, {Ty});
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
+ I->getModule(), Intrinsic::vscale, {Ty});
Res = CallInst::Create(Fn->getFunctionType(), Fn);
break;
}
@@ -600,7 +600,8 @@ Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) {
if (ShVal0 != ShVal1)
Y = Builder.CreateTrunc(ShVal1, DestTy);
Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
- Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(Trunc.getModule(), IID, DestTy);
return CallInst::Create(F, {X, Y, NarrowShAmt});
}
@@ -1912,8 +1913,8 @@ Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
// Do unary FP operation on smaller type.
// (fptrunc (fabs x)) -> (fabs (fptrunc x))
Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
- Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
- II->getIntrinsicID(), Ty);
+ Function *Overload = Intrinsic::getOrInsertDeclaration(
+ FPT.getModule(), II->getIntrinsicID(), Ty);
SmallVector<OperandBundleDef, 1> OpBundles;
II->getOperandBundlesAsDefs(OpBundles);
CallInst *NewCI =
@@ -2855,8 +2856,8 @@ Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) {
if (IntrinsicNum != 0) {
assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op");
- Function *BswapOrBitreverse =
- Intrinsic::getDeclaration(CI.getModule(), IntrinsicNum, DestTy);
+ Function *BswapOrBitreverse = Intrinsic::getOrInsertDeclaration(
+ CI.getModule(), IntrinsicNum, DestTy);
Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
return CallInst::Create(BswapOrBitreverse, {ScalarX});
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index d1eb84b5ca5c10..7129499e0f8f9d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1125,7 +1125,7 @@ static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// use the sadd_with_overflow intrinsic to efficiently compute both the
// result and the overflow bit.
Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
- Function *F = Intrinsic::getDeclaration(
+ Function *F = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::sadd_with_overflow, NewType);
InstCombiner::BuilderTy &Builder = IC.Builder;
@@ -4790,11 +4790,11 @@ Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) {
if (MulHadOtherUses)
Builder.SetInsertPoint(Mul);
- Function *F = Intrinsic::getDeclaration(I.getModule(),
- Div->getOpcode() == Instruction::UDiv
- ? Intrinsic::umul_with_overflow
- : Intrinsic::smul_with_overflow,
- X->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ I.getModule(),
+ Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
+ : Intrinsic::smul_with_overflow,
+ X->getType());
CallInst *Call = Builder.CreateCall(F, {X, Y}, "mul");
// If the multiplication was used elsewhere, to ensure that we don't leave
@@ -6334,7 +6334,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
MulA = Builder.CreateZExt(A, MulType);
if (WidthB < MulWidth)
MulB = Builder.CreateZExt(B, MulType);
- Function *F = Intrinsic::getDeclaration(
+ Function *F = Intrinsic::getOrInsertDeclaration(
I.getModule(), Intrinsic::umul_with_overflow, MulType);
CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
IC.addToWorklist(MulInstr);
@@ -7121,8 +7121,8 @@ static Instruction *foldVectorCmp(CmpInst &Cmp,
if (auto *I = dyn_cast<Instruction>(V))
I->copyIRFlags(&Cmp);
Module *M = Cmp.getModule();
- Function *F =
- Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, V->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_reverse, V->getType());
return CallInst::Create(F, V);
};
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 3f780285efe423..358563a5fcd537 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -1148,8 +1148,8 @@ static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
if (!match(II->getOperand(0), m_c_And(m_Specific(X), m_Neg(m_Specific(X)))))
return nullptr;
- Function *F = Intrinsic::getDeclaration(II->getModule(), Intrinsic::cttz,
- II->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ II->getModule(), Intrinsic::cttz, II->getType());
return CallInst::Create(F, {X, II->getArgOperand(1)});
}
@@ -2242,8 +2242,8 @@ foldOverflowingAddSubSelect(SelectInst &SI, InstCombiner::BuilderTy &Builder) {
else
return nullptr;
- Function *F =
- Intrinsic::getDeclaration(SI.getModule(), NewIntrinsicID, SI.getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(SI.getModule(),
+ NewIntrinsicID, SI.getType());
return CallInst::Create(F, {X, Y});
}
@@ -2537,7 +2537,8 @@ static Instruction *foldSelectFunnelShift(SelectInst &Sel,
// This is a funnel/rotate that avoids shift-by-bitwidth UB in a suboptimal way.
// Convert to funnel shift intrinsic.
Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
- Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(Sel.getModule(), IID, Sel.getType());
ShAmt = Builder.CreateZExt(ShAmt, Sel.getType());
return CallInst::Create(F, { SV0, SV1, ShAmt });
}
@@ -2580,8 +2581,8 @@ static Instruction *foldSelectToCopysign(SelectInst &Sel,
// Canonicalize the magnitude argument as the positive constant since we do
// not care about its sign.
Value *MagArg = ConstantFP::get(SelType, abs(*TC));
- Function *F = Intrinsic::getDeclaration(Sel.getModule(), Intrinsic::copysign,
- Sel.getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ Sel.getModule(), Intrinsic::copysign, Sel.getType());
return CallInst::Create(F, { MagArg, X });
}
@@ -2600,8 +2601,8 @@ Instruction *InstCombinerImpl::foldVectorSelect(SelectInst &Sel) {
if (auto *I = dyn_cast<Instruction>(V))
I->copyIRFlags(&Sel);
Module *M = Sel.getModule();
- Function *F =
- Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, V->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_reverse, V->getType());
return CallInst::Create(F, V);
};
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 3d4461dc1a87f6..8ca705ae1d364d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -898,7 +898,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Instruction *I,
Value *X;
if (DemandedMask == 1 && VTy->getScalarSizeInBits() % 2 == 0 &&
match(II->getArgOperand(0), m_Not(m_Value(X)))) {
- Function *Ctpop = Intrinsic::getDeclaration(
+ Function *Ctpop = Intrinsic::getOrInsertDeclaration(
II->getModule(), Intrinsic::ctpop, VTy);
return InsertNewInstWith(CallInst::Create(Ctpop, {X}), I->getIterator());
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index d9b4faff4c004d..d68ae64f08aa90 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -2474,8 +2474,8 @@ static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
if (IsFNeg)
return UnaryOperator::CreateFNegFMF(NewShuf, S0);
- Function *FAbs = Intrinsic::getDeclaration(Shuf.getModule(),
- Intrinsic::fabs, Shuf.getType());
+ Function *FAbs = Intrinsic::getOrInsertDeclaration(
+ Shuf.getModule(), Intrinsic::fabs, Shuf.getType());
CallInst *NewF = CallInst::Create(FAbs, {NewShuf});
NewF->setFastMathFlags(S0->getFastMathFlags());
return NewF;
@@ -2495,8 +2495,8 @@ static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
if (IsFNeg) {
NewF = UnaryOperator::CreateFNeg(NewShuf);
} else {
- Function *FAbs = Intrinsic::getDeclaration(Shuf.getModule(),
- Intrinsic::fabs, Shuf.getType());
+ Function *FAbs = Intrinsic::getOrInsertDeclaration(
+ Shuf.getModule(), Intrinsic::fabs, Shuf.getType());
NewF = CallInst::Create(FAbs, {NewShuf});
}
NewF->copyIRFlags(S0);
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1f4a6f793404cf..954c4cf19c2077 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2084,8 +2084,8 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
if (auto *BO = dyn_cast<BinaryOperator>(V))
BO->copyIRFlags(&Inst);
Module *M = Inst.getModule();
- Function *F =
- Intrinsic::getDeclaration(M, Intrinsic::vector_reverse, V->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_reverse, V->getType());
return CallInst::Create(F, V);
};
@@ -3355,7 +3355,7 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
// Replace invoke with a NOP intrinsic to maintain the original CFG
Module *M = II->getModule();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
+ Function *F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::donothing);
InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(), {}, "",
II->getParent());
}
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 2ad89b5ba753a5..02d9fab309d83b 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1109,7 +1109,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
// alloca. We have a special @llvm.get.dynamic.area.offset intrinsic for
// this purpose.
if (!isa<ReturnInst>(InstBefore)) {
- Function *DynamicAreaOffsetFunc = Intrinsic::getDeclaration(
+ Function *DynamicAreaOffsetFunc = Intrinsic::getOrInsertDeclaration(
InstBefore->getModule(), Intrinsic::get_dynamic_area_offset,
{IntptrTy});
@@ -1867,7 +1867,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
const ASanAccessInfo AccessInfo(IsWrite, CompileKernel, AccessSizeIndex);
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
IRB.CreateCall(
- Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::asan_check_memaccess),
{IRB.CreatePointerCast(Addr, PtrTy),
ConstantInt::get(Int32Ty, AccessInfo.Packed)});
return;
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 618b6fe1aea474..63d580d2b9d512 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -194,7 +194,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
IRB.SetInsertPoint(TrapBB);
Intrinsic::ID IntrID = DebugTrapBB ? Intrinsic::ubsantrap : Intrinsic::trap;
- auto *F = Intrinsic::getDeclaration(Fn->getParent(), IntrID);
+ auto *F = Intrinsic::getOrInsertDeclaration(Fn->getParent(), IntrID);
CallInst *TrapCall;
if (DebugTrapBB) {
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index cc7f20cffea771..5ec4973ea03d8f 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1042,14 +1042,14 @@ void HWAddressSanitizer::instrumentMemAccessOutline(Value *Ptr, bool IsWrite,
if (UseFixedShadowIntrinsic) {
IRB.CreateCall(
- Intrinsic::getDeclaration(
+ Intrinsic::getOrInsertDeclaration(
M, UseShortGranules
? Intrinsic::hwasan_check_memaccess_shortgranules_fixedshadow
: Intrinsic::hwasan_check_memaccess_fixedshadow),
{Ptr, ConstantInt::get(Int32Ty, AccessInfo),
ConstantInt::get(Int64Ty, Mapping.offset())});
} else {
- IRB.CreateCall(Intrinsic::getDeclaration(
+ IRB.CreateCall(Intrinsic::getOrInsertDeclaration(
M, UseShortGranules
? Intrinsic::hwasan_check_memaccess_shortgranules
: Intrinsic::hwasan_check_memaccess),
diff --git a/llvm/lib/Transforms/Instrumentation/KCFI.cpp b/llvm/lib/Transforms/Instrumentation/KCFI.cpp
index 28dc1c02b661ac..bbe0f4c6178192 100644
--- a/llvm/lib/Transforms/Instrumentation/KCFI.cpp
+++ b/llvm/lib/Transforms/Instrumentation/KCFI.cpp
@@ -110,7 +110,8 @@ PreservedAnalyses KCFIPass::run(Function &F, FunctionAnalysisManager &AM) {
Instruction *ThenTerm =
SplitBlockAndInsertIfThen(Test, Call, false, VeryUnlikelyWeights);
Builder.SetInsertPoint(ThenTerm);
- Builder.CreateCall(Intrinsic::getDeclaration(&M, Intrinsic::debugtrap));
+ Builder.CreateCall(
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::debugtrap));
++NumKCFIChecks;
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 07d667434e0710..19ec97c17f31c6 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2853,7 +2853,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *S2Conv =
IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)), S2->getType());
Value *V2 = I.getOperand(2);
- Function *Intrin = Intrinsic::getDeclaration(
+ Function *Intrin = Intrinsic::getOrInsertDeclaration(
I.getModule(), I.getIntrinsicID(), S2Conv->getType());
Value *Shift = IRB.CreateCall(Intrin, {S0, S1, V2});
setShadow(&I, IRB.CreateOr(Shift, S2Conv));
@@ -3057,7 +3057,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Op = I.getArgOperand(0);
Type *OpType = Op->getType();
- Function *BswapFunc = Intrinsic::getDeclaration(
+ Function *BswapFunc = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::bswap, ArrayRef(&OpType, 1));
setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
setOrigin(&I, getOrigin(Op));
@@ -3287,7 +3287,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
S2_ext = IRB.CreateBitCast(S2_ext, getMMXVectorTy(64));
}
- Function *ShadowFn = Intrinsic::getDeclaration(
+ Function *ShadowFn = Intrinsic::getOrInsertDeclaration(
F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
Value *S =
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index 10442fa0bb9003..e6e474ed376069 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -919,7 +919,7 @@ void FunctionInstrumenter::instrument() {
// llvm.instrprof.cover(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
- Intrinsic::getDeclaration(&M, Intrinsic::instrprof_cover),
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::instrprof_cover),
{NormalizedNamePtr, CFGHash, Builder.getInt32(1), Builder.getInt32(0)});
return;
}
@@ -931,7 +931,7 @@ void FunctionInstrumenter::instrument() {
if (IsCtxProf) {
auto *CSIntrinsic =
- Intrinsic::getDeclaration(&M, Intrinsic::instrprof_callsite);
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::instrprof_callsite);
// We want to count the instrumentable callsites, then instrument them. This
// is because the llvm.instrprof.callsite intrinsic has an argument (like
// the other instrprof intrinsics) capturing the total number of
@@ -972,7 +972,7 @@ void FunctionInstrumenter::instrument() {
// llvm.instrprof.timestamp(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
- Intrinsic::getDeclaration(&M, Intrinsic::instrprof_timestamp),
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::instrprof_timestamp),
{NormalizedNamePtr, CFGHash, Builder.getInt32(NumCounters),
Builder.getInt32(I)});
I += PGOBlockCoverage ? 8 : 1;
@@ -984,12 +984,12 @@ void FunctionInstrumenter::instrument() {
"Cannot get the Instrumentation point");
// llvm.instrprof.increment(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
- Builder.CreateCall(
- Intrinsic::getDeclaration(&M, PGOBlockCoverage
- ? Intrinsic::instrprof_cover
- : Intrinsic::instrprof_increment),
- {NormalizedNamePtr, CFGHash, Builder.getInt32(NumCounters),
- Builder.getInt32(I++)});
+ Builder.CreateCall(Intrinsic::getOrInsertDeclaration(
+ &M, PGOBlockCoverage
+ ? Intrinsic::instrprof_cover
+ : Intrinsic::instrprof_increment),
+ {NormalizedNamePtr, CFGHash,
+ Builder.getInt32(NumCounters), Builder.getInt32(I++)});
}
// Now instrument select instructions:
@@ -1038,7 +1038,8 @@ void FunctionInstrumenter::instrument() {
SmallVector<OperandBundleDef, 1> OpBundles;
populateEHOperandBundle(Cand, BlockColors, OpBundles);
Builder.CreateCall(
- Intrinsic::getDeclaration(&M, Intrinsic::instrprof_value_profile),
+ Intrinsic::getOrInsertDeclaration(&M,
+ Intrinsic::instrprof_value_profile),
{NormalizedNamePtr, Builder.getInt64(FuncInfo.FunctionHash),
ToProfile, Builder.getInt32(Kind), Builder.getInt32(SiteIndex++)},
OpBundles);
@@ -1726,7 +1727,7 @@ void SelectInstVisitor::instrumentOneSelectInst(SelectInst &SI) {
ConstantExpr::getPointerBitCastOrAddrSpaceCast(
FuncNameVar, PointerType::get(M->getContext(), 0));
Builder.CreateCall(
- Intrinsic::getDeclaration(M, Intrinsic::instrprof_increment_step),
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::instrprof_increment_step),
{NormalizedFuncNameVarPtr, Builder.getInt64(FuncHash),
Builder.getInt32(TotalNumCtrs), Builder.getInt32(*CurCtrIdx), Step});
++(*CurCtrIdx);
diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index db4bf709c9cc9c..719806fdf37f58 100644
--- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -999,7 +999,7 @@ void ModuleSanitizerCoverage::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
if (Options.StackDepth && IsEntryBB && !IsLeafFunc) {
// Check stack depth. If it's the deepest so far, record it.
Module *M = F.getParent();
- Function *GetFrameAddr = Intrinsic::getDeclaration(
+ Function *GetFrameAddr = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
auto FrameAddrPtr =
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 68cf4e55301314..388addfab181a4 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -571,9 +571,10 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
// Instrument function entry/exit points if there were instrumented accesses.
if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
InstrumentationIRBuilder IRB(F.getEntryBlock().getFirstNonPHI());
- Value *ReturnAddress = IRB.CreateCall(
- Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
- IRB.getInt32(0));
+ Value *ReturnAddress =
+ IRB.CreateCall(Intrinsic::getOrInsertDeclaration(
+ F.getParent(), Intrinsic::returnaddress),
+ IRB.getInt32(0));
IRB.CreateCall(TsanFuncEntry, ReturnAddress);
EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
diff --git a/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h b/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
index c11691c613ac78..0dedd0207571bf 100644
--- a/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
+++ b/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
@@ -139,7 +139,7 @@ class ARCRuntimeEntryPoints {
if (Decl)
return Decl;
- return Decl = Intrinsic::getDeclaration(TheModule, IntID);
+ return Decl = Intrinsic::getOrInsertDeclaration(TheModule, IntID);
}
};
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 60fd2a286119b3..9317e0643079ea 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -380,7 +380,8 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
case Intrinsic::masked_load: {
Type *DestTy = II->getType();
Type *SrcTy = NewV->getType();
- Function *NewDecl = Intrinsic::getDeclaration(M, IID, {DestTy, SrcTy});
+ Function *NewDecl =
+ Intrinsic::getOrInsertDeclaration(M, IID, {DestTy, SrcTy});
II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return true;
@@ -391,7 +392,8 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
case Intrinsic::masked_gather: {
Type *RetTy = II->getType();
Type *NewPtrTy = NewV->getType();
- Function *NewDecl = Intrinsic::getDeclaration(M, IID, {RetTy, NewPtrTy});
+ Function *NewDecl =
+ Intrinsic::getOrInsertDeclaration(M, IID, {RetTy, NewPtrTy});
II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return true;
@@ -400,16 +402,16 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
case Intrinsic::masked_scatter: {
Type *ValueTy = II->getOperand(0)->getType();
Type *NewPtrTy = NewV->getType();
- Function *NewDecl =
- Intrinsic::getDeclaration(M, II->getIntrinsicID(), {ValueTy, NewPtrTy});
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
+ M, II->getIntrinsicID(), {ValueTy, NewPtrTy});
II->setArgOperand(1, NewV);
II->setCalledFunction(NewDecl);
return true;
}
case Intrinsic::prefetch:
case Intrinsic::is_constant: {
- Function *NewDecl =
- Intrinsic::getDeclaration(M, II->getIntrinsicID(), {NewV->getType()});
+ Function *NewDecl = Intrinsic::getOrInsertDeclaration(
+ M, II->getIntrinsicID(), {NewV->getType()});
II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return true;
diff --git a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
index d85166e518f1db..4043c0e9a7ddc4 100644
--- a/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopDataPrefetch.cpp
@@ -405,7 +405,7 @@ bool LoopDataPrefetch::runOnLoop(Loop *L) {
IRBuilder<> Builder(P.InsertPt);
Module *M = BB->getParent()->getParent();
Type *I32 = Type::getInt32Ty(BB->getContext());
- Function *PrefetchFunc = Intrinsic::getDeclaration(
+ Function *PrefetchFunc = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::prefetch, PrefPtrValue->getType());
Builder.CreateCall(
PrefetchFunc,
diff --git a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
index d5e91d3c1decf8..30369ed7c245cf 100644
--- a/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFlatten.cpp
@@ -978,8 +978,8 @@ static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI,
assert(match(Br->getCondition(), m_Zero()) &&
"Expected branch condition to be false");
IRBuilder<> Builder(Br);
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::umul_with_overflow,
- FI.OuterTripCount->getType());
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::umul_with_overflow, FI.OuterTripCount->getType());
Value *Call = Builder.CreateCall(F, {FI.OuterTripCount, FI.InnerTripCount},
"flatten.mul");
FI.NewTripCount = Builder.CreateExtractValue(Call, 0, "flatten.tripcount");
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 740e1e39b9ee77..56006d9ae6924a 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -2122,7 +2122,7 @@ static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
- Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
+ Function *Func = Intrinsic::getOrInsertDeclaration(M, Intrinsic::ctpop, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CI->setDebugLoc(DL);
@@ -2136,7 +2136,7 @@ static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
- Function *Func = Intrinsic::getDeclaration(M, IID, Tys);
+ Function *Func = Intrinsic::getOrInsertDeclaration(M, IID, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CI->setDebugLoc(DL);
diff --git a/llvm/lib/Transforms/Scalar/LowerGuardIntrinsic.cpp b/llvm/lib/Transforms/Scalar/LowerGuardIntrinsic.cpp
index a59ecdda1746f9..ce35349376c483 100644
--- a/llvm/lib/Transforms/Scalar/LowerGuardIntrinsic.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerGuardIntrinsic.cpp
@@ -44,7 +44,7 @@ static bool lowerGuardIntrinsic(Function &F) {
if (ToLower.empty())
return false;
- auto *DeoptIntrinsic = Intrinsic::getDeclaration(
+ auto *DeoptIntrinsic = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::experimental_deoptimize, {F.getReturnType()});
DeoptIntrinsic->setCallingConv(GuardDecl->getCallingConv());
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 0d98e844cf91ea..a4ab288b1bfee8 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1290,7 +1290,7 @@ class LowerMatrixIntrinsics {
if (AllowContraction) {
// Use fmuladd for floating point operations and let the backend decide
// if that's profitable.
- Function *FMulAdd = Intrinsic::getDeclaration(
+ Function *FMulAdd = Intrinsic::getOrInsertDeclaration(
Func.getParent(), Intrinsic::fmuladd, A->getType());
return Builder.CreateCall(FMulAdd, {A, B, Sum});
}
diff --git a/llvm/lib/Transforms/Scalar/MakeGuardsExplicit.cpp b/llvm/lib/Transforms/Scalar/MakeGuardsExplicit.cpp
index aea17aa82a88a4..b9f88ba4e0780e 100644
--- a/llvm/lib/Transforms/Scalar/MakeGuardsExplicit.cpp
+++ b/llvm/lib/Transforms/Scalar/MakeGuardsExplicit.cpp
@@ -69,7 +69,7 @@ static bool explicifyGuards(Function &F) {
if (GuardIntrinsics.empty())
return false;
- auto *DeoptIntrinsic = Intrinsic::getDeclaration(
+ auto *DeoptIntrinsic = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::experimental_deoptimize, {F.getReturnType()});
DeoptIntrinsic->setCallingConv(GuardDecl->getCallingConv());
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index b568811dcdbcac..557a75e8946dc3 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -1855,8 +1855,8 @@ bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
// If not, then we know we can transform this.
Type *ArgTys[3] = {M->getRawDest()->getType(), M->getRawSource()->getType(),
M->getLength()->getType()};
- M->setCalledFunction(
- Intrinsic::getDeclaration(M->getModule(), Intrinsic::memcpy, ArgTys));
+ M->setCalledFunction(Intrinsic::getOrInsertDeclaration(
+ M->getModule(), Intrinsic::memcpy, ArgTys));
// For MemorySSA nothing really changes (except that memcpy may imply stricter
// aliasing guarantees).
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index e3c12c971b9ab0..daf8fa28a71e59 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1525,8 +1525,8 @@ static void CreateGCRelocates(ArrayRef<Value *> LiveVariables,
if (auto *VT = dyn_cast<VectorType>(Ty))
NewTy = FixedVectorType::get(NewTy,
cast<FixedVectorType>(VT)->getNumElements());
- return Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate,
- {NewTy});
+ return Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::experimental_gc_relocate, {NewTy});
};
// Lazily populated map from input types to the canonicalized form mentioned
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 72728c0f839e5d..b1e4c7e52d99a0 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -733,7 +733,8 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
ValueVector Res(VS->NumFragments);
ValueVector ScalarCallOps(NumArgs);
- Function *NewIntrin = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ Function *NewIntrin =
+ Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
IRBuilder<> Builder(&CI);
// Perform actual scalarization, taking care to preserve any scalar operands.
@@ -756,7 +757,7 @@ bool ScalarizerVisitor::splitCall(CallInst &CI) {
}
if (IsRemainder)
- NewIntrin = Intrinsic::getDeclaration(F->getParent(), ID, Tys);
+ NewIntrin = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, Tys);
Res[I] = Builder.CreateCall(NewIntrin, ScalarCallOps,
CI.getName() + ".i" + Twine(I));
diff --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
index 3cf68e07da5be2..e1dd20478fd55f 100644
--- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
+++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp
@@ -225,7 +225,8 @@ struct AssumeBuilderState {
return nullptr;
if (!DebugCounter::shouldExecute(BuildAssumeCounter))
return nullptr;
- Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
+ Function *FnAssume =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::assume);
LLVMContext &C = M->getContext();
SmallVector<OperandBundleDef, 8> OpBundle;
for (auto &MapElem : AssumedKnowledgeMap) {
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index fc03643e3542cc..c6ba85bd9e57d4 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -425,8 +425,8 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
// Create intrinsic call.
LLVMContext &Ctx = NewFunc->getContext();
- Function *IFn =
- Intrinsic::getDeclaration(NewFunc->getParent(), CIID, TParams);
+ Function *IFn = Intrinsic::getOrInsertDeclaration(NewFunc->getParent(),
+ CIID, TParams);
SmallVector<Value *, 4> Args;
unsigned NumOperands = OldInst.getNumOperands();
if (isa<CallInst>(OldInst))
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index f58448dd9562d5..a090c5ed749205 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -1124,7 +1124,8 @@ static void insertLifetimeMarkersSurroundingCall(
TheCall->getFunction()) &&
"Input memory not defined in original function");
- Function *Func = Intrinsic::getDeclaration(M, MarkerFunc, Mem->getType());
+ Function *Func =
+ Intrinsic::getOrInsertDeclaration(M, MarkerFunc, Mem->getType());
auto Marker = CallInst::Create(Func, {NegativeOne, Mem});
if (InsertBefore)
Marker->insertBefore(TheCall);
diff --git a/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp b/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp
index d12c540f9a4d04..47bb31905d1ac8 100644
--- a/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp
+++ b/llvm/lib/Transforms/Utils/EntryExitInstrumenter.cpp
@@ -63,7 +63,7 @@ static void insertCall(Function &CurFn, StringRef Func,
Func, FunctionType::get(Type::getVoidTy(C), ArgTypes, false));
Instruction *RetAddr = CallInst::Create(
- Intrinsic::getDeclaration(&M, Intrinsic::returnaddress),
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::returnaddress),
ArrayRef<Value *>(ConstantInt::get(Type::getInt32Ty(C), 0)), "",
InsertionPt);
RetAddr->setDebugLoc(DL);
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 671b0d0822a5d9..110fd6de5c6968 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -2090,7 +2090,7 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
if (IsUnsafeClaimRV) {
Builder.SetInsertPoint(II);
Function *IFn =
- Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::objc_release);
Builder.CreateCall(IFn, RetOpnd, "");
}
II->eraseFromParent();
@@ -2125,7 +2125,8 @@ inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind,
// matching autoreleaseRV or an annotated call in the callee. Emit a call
// to objc_retain.
Builder.SetInsertPoint(RI);
- Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
+ Function *IFn =
+ Intrinsic::getOrInsertDeclaration(Mod, Intrinsic::objc_retain);
Builder.CreateCall(IFn, RetOpnd, "");
}
}
@@ -3021,7 +3022,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
});
} else {
SmallVector<ReturnInst *, 8> NormalReturns;
- Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
+ Function *NewDeoptIntrinsic = Intrinsic::getOrInsertDeclaration(
Caller->getParent(), Intrinsic::experimental_deoptimize,
{Caller->getReturnType()});
diff --git a/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
index 11956816a6ec3f..e95a7a9ae525ac 100644
--- a/llvm/lib/Transforms/Utils/IntegerDivision.cpp
+++ b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -157,8 +157,8 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
BasicBlock *IBB = Builder.GetInsertBlock();
Function *F = IBB->getParent();
- Function *CTLZ = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
- DivTy);
+ Function *CTLZ =
+ Intrinsic::getOrInsertDeclaration(F->getParent(), Intrinsic::ctlz, DivTy);
// Our CFG is going to look like:
// +---------------------+
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index f3b8623ebb0f8f..06813bac7c781f 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -4141,7 +4141,8 @@ bool llvm::recognizeBSwapOrBitReverseIdiom(
else
return false;
- Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
+ Function *F =
+ Intrinsic::getOrInsertDeclaration(I->getModule(), Intrin, DemandedTy);
Value *Provider = Res->Provider;
// We may need to truncate the provider.
diff --git a/llvm/lib/Transforms/Utils/LowerGlobalDtors.cpp b/llvm/lib/Transforms/Utils/LowerGlobalDtors.cpp
index 55f9400d93d79b..cd79600657032e 100644
--- a/llvm/lib/Transforms/Utils/LowerGlobalDtors.cpp
+++ b/llvm/lib/Transforms/Utils/LowerGlobalDtors.cpp
@@ -215,8 +215,8 @@ static bool runImpl(Module &M) {
// If `__cxa_atexit` hits out-of-memory, trap, so that we don't misbehave.
// This should be very rare, because if the process is running out of
// memory before main has even started, something is wrong.
- CallInst::Create(Intrinsic::getDeclaration(&M, Intrinsic::trap), "",
- FailBB);
+ CallInst::Create(Intrinsic::getOrInsertDeclaration(&M, Intrinsic::trap),
+ "", FailBB);
new UnreachableInst(C, FailBB);
ReturnInst::Create(C, RetBB);
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 1cb1a7b396badc..77abf160dc70f9 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -268,7 +268,7 @@ bool isLifetimeIntrinsic(Value *V) {
Value *readRegister(IRBuilder<> &IRB, StringRef Name) {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
- Function *ReadRegister = Intrinsic::getDeclaration(
+ Function *ReadRegister = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::read_register, IRB.getIntPtrTy(M->getDataLayout()));
MDNode *MD =
MDNode::get(M->getContext(), {MDString::get(M->getContext(), Name)});
@@ -287,7 +287,7 @@ Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB) {
Value *getFP(IRBuilder<> &IRB) {
Function *F = IRB.GetInsertBlock()->getParent();
Module *M = F->getParent();
- auto *GetStackPointerFn = Intrinsic::getDeclaration(
+ auto *GetStackPointerFn = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::frameaddress,
IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
return IRB.CreatePtrToInt(
@@ -301,7 +301,7 @@ Value *getAndroidSlotPtr(IRBuilder<> &IRB, int Slot) {
// Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
// in Bionic's libc/private/bionic_tls.h.
Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
IRB.CreateCall(ThreadPointerFunc), 8 * Slot);
}
diff --git a/llvm/lib/Transforms/Utils/PredicateInfo.cpp b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
index 186e17e166ba3d..2415118cad6fb3 100644
--- a/llvm/lib/Transforms/Utils/PredicateInfo.cpp
+++ b/llvm/lib/Transforms/Utils/PredicateInfo.cpp
@@ -559,7 +559,7 @@ Value *PredicateInfoBuilder::materializeStack(unsigned int &Counter,
if (isa<PredicateWithEdge>(ValInfo)) {
IRBuilder<> B(getBranchTerminator(ValInfo));
auto NumDecls = F.getParent()->getNumNamedValues();
- Function *IF = Intrinsic::getDeclaration(
+ Function *IF = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::ssa_copy, Op->getType());
if (NumDecls != F.getParent()->getNumNamedValues())
PI.CreatedDeclarations.insert(IF);
@@ -575,7 +575,7 @@ Value *PredicateInfoBuilder::materializeStack(unsigned int &Counter,
// directly before it, assume(i1 true) is not a useful fact.
IRBuilder<> B(PAssume->AssumeInst->getNextNode());
auto NumDecls = F.getParent()->getNumNamedValues();
- Function *IF = Intrinsic::getDeclaration(
+ Function *IF = Intrinsic::getOrInsertDeclaration(
F.getParent(), Intrinsic::ssa_copy, Op->getType());
if (NumDecls != F.getParent()->getNumNamedValues())
PI.CreatedDeclarations.insert(IF);
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 1b7912fdf5e304..656bb1ebd1161e 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -444,7 +444,7 @@ struct PromoteMem2Reg {
/// Given a LoadInst LI this adds assume(LI != null) after it.
static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) {
Function *AssumeIntrinsic =
- Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume);
+ Intrinsic::getOrInsertDeclaration(LI->getModule(), Intrinsic::assume);
ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI,
Constant::getNullValue(LI->getType()));
LoadNotNull->insertAfter(LI);
diff --git a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
index 6e84965370b248..2700b4307308cb 100644
--- a/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
+++ b/llvm/lib/Transforms/Utils/RelLookupTableConverter.cpp
@@ -151,7 +151,7 @@ static void convertToRelLookupTable(GlobalVariable &LookupTable) {
// GEP might not be immediately followed by a LOAD, like it can be hoisted
// outside the loop or another instruction might be inserted them in between.
Builder.SetInsertPoint(Load);
- Function *LoadRelIntrinsic = llvm::Intrinsic::getDeclaration(
+ Function *LoadRelIntrinsic = llvm::Intrinsic::getOrInsertDeclaration(
&M, Intrinsic::load_relative, {Index->getType()});
// Create a call to load.relative intrinsic that computes the target address
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 1ff3cd78aa9877..de1864ef5b8d9b 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -2134,8 +2134,8 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
MulV = TruncTripCount;
OfMul = ConstantInt::getFalse(MulV->getContext());
} else {
- auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
- Intrinsic::umul_with_overflow, Ty);
+ auto *MulF = Intrinsic::getOrInsertDeclaration(
+ Loc->getModule(), Intrinsic::umul_with_overflow, Ty);
CallInst *Mul =
Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index e06ebb691d511c..db2acb9eed0938 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -1960,7 +1960,7 @@ static Value *optimizeDoubleFP(CallInst *CI, IRBuilderBase &B,
if (IsIntrinsic) {
Module *M = CI->getModule();
Intrinsic::ID IID = CalleeFn->getIntrinsicID();
- Function *Fn = Intrinsic::getDeclaration(M, IID, B.getFloatTy());
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, IID, B.getFloatTy());
R = isBinary ? B.CreateCall(Fn, V) : B.CreateCall(Fn, V[0]);
} else {
AttributeList CalleeAttrs = CalleeFn->getAttributes();
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index e2958c49b8ca9f..5c164075e83259 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -15079,7 +15079,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
false /*HasGlobalPred*/);
CF = VFDatabase(*CI).getVectorizedFunction(Shape);
} else {
- CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
+ CF = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, TysForDecl);
}
SmallVector<OperandBundleDef, 1> OpBundles;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index ba94cd29587664..2948ecc580edc0 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -984,7 +984,7 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
// Use vector version of the intrinsic.
Module *M = State.Builder.GetInsertBlock()->getModule();
Function *VectorF =
- Intrinsic::getDeclaration(M, VectorIntrinsicID, TysForDecl);
+ Intrinsic::getOrInsertDeclaration(M, VectorIntrinsicID, TysForDecl);
assert(VectorF && "Can't retrieve vector intrinsic.");
auto *CI = cast_or_null<CallInst>(getUnderlyingValue());
diff --git a/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp b/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp
index fb8729c36a6f2d..0e2a6decfbc9d5 100644
--- a/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp
+++ b/llvm/tools/llvm-reduce/deltas/ReduceOpcodes.cpp
@@ -30,7 +30,7 @@ static bool shouldIgnoreArgument(const Value *V) {
static Value *replaceIntrinsic(Module &M, IntrinsicInst *II,
Intrinsic::ID NewIID,
ArrayRef<Type *> Tys = {}) {
- Function *NewFunc = Intrinsic::getDeclaration(&M, NewIID, Tys);
+ Function *NewFunc = Intrinsic::getOrInsertDeclaration(&M, NewIID, Tys);
II->setCalledFunction(NewFunc);
return II;
}
diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
index 6437e0c9491f7f..8ad15ca41510f2 100644
--- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
+++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp
@@ -430,7 +430,8 @@ static void RunRandTest(uint64_t Seed, int Size, int MinCount, int MaxCount,
BB->insertInto(F);
Instruction *Ret = ReturnInst::Create(C);
Ret->insertInto(BB, BB->begin());
- Function *FnAssume = Intrinsic::getDeclaration(Mod.get(), Intrinsic::assume);
+ Function *FnAssume =
+ Intrinsic::getOrInsertDeclaration(Mod.get(), Intrinsic::assume);
std::vector<Argument *> ShuffledArgs;
BitVector HasArg;
diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index 9e6c517242a269..81784bb2360975 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -1120,7 +1120,7 @@ TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
B.CreateStore(B.getInt8(0), Bar);
auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
- return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
+ return Intrinsic::getOrInsertDeclaration(&M, ID, {Foo->getType()});
};
B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 77d966155dceff..0145ee70a14c17 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2481,8 +2481,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAddWithRange) {
TEST_F(ComputeKnownBitsTest, ComputeKnownBitsUnknownVScale) {
Module M("", Context);
IRBuilder<> Builder(Context);
- Function *TheFn =
- Intrinsic::getDeclaration(&M, Intrinsic::vscale, {Builder.getInt32Ty()});
+ Function *TheFn = Intrinsic::getOrInsertDeclaration(&M, Intrinsic::vscale,
+ {Builder.getInt32Ty()});
CallInst *CI = Builder.CreateCall(TheFn, {}, {}, "");
KnownBits Known = computeKnownBits(CI, M.getDataLayout(), /* Depth */ 0);
diff --git a/llvm/unittests/IR/BasicBlockTest.cpp b/llvm/unittests/IR/BasicBlockTest.cpp
index eea2746a352aa6..88ac6611742ce9 100644
--- a/llvm/unittests/IR/BasicBlockTest.cpp
+++ b/llvm/unittests/IR/BasicBlockTest.cpp
@@ -109,8 +109,10 @@ TEST(BasicBlockTest, TestInstructionsWithoutDebug) {
Argument *V = new Argument(Type::getInt32Ty(Ctx));
Function *F = Function::Create(FT, Function::ExternalLinkage, "", M);
- Function *DbgDeclare = Intrinsic::getDeclaration(M, Intrinsic::dbg_declare);
- Function *DbgValue = Intrinsic::getDeclaration(M, Intrinsic::dbg_value);
+ Function *DbgDeclare =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_declare);
+ Function *DbgValue =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_value);
Value *DIV = MetadataAsValue::get(Ctx, (Metadata *)nullptr);
SmallVector<Value *, 3> Args = {DIV, DIV, DIV};
@@ -174,7 +176,7 @@ class InstrOrderInvalidationTest : public ::testing::Test {
protected:
void SetUp() override {
M.reset(new Module("MyModule", Ctx));
- Nop = Intrinsic::getDeclaration(M.get(), Intrinsic::donothing);
+ Nop = Intrinsic::getOrInsertDeclaration(M.get(), Intrinsic::donothing);
FunctionType *FT = FunctionType::get(Type::getVoidTy(Ctx), {}, false);
Function *F = Function::Create(FT, Function::ExternalLinkage, "foo", *M);
BB = BasicBlock::Create(Ctx, "entry", F);
diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp
index 953df224e84dcb..ea20c87d6b09b4 100644
--- a/llvm/unittests/IR/DebugInfoTest.cpp
+++ b/llvm/unittests/IR/DebugInfoTest.cpp
@@ -693,7 +693,8 @@ TEST(IRBuilder, GetSetInsertionPointWithEmptyBasicBlock) {
std::unique_ptr<BasicBlock> BB(BasicBlock::Create(C, "start"));
Module *M = new Module("module", C);
IRBuilder<> Builder(BB.get());
- Function *DbgDeclare = Intrinsic::getDeclaration(M, Intrinsic::dbg_declare);
+ Function *DbgDeclare =
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::dbg_declare);
Value *DIV = MetadataAsValue::get(C, (Metadata *)nullptr);
SmallVector<Value *, 3> Args = {DIV, DIV, DIV};
Builder.CreateCall(DbgDeclare, Args);
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index d5239f21147cdb..690af62d18020d 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -413,8 +413,9 @@ TEST_F(IRBuilderTest, ConstrainedFPIntrinsics) {
Builder.setDefaultConstrainedExcept(fp::ebStrict);
Builder.setDefaultConstrainedRounding(RoundingMode::TowardZero);
- Function *Fn = Intrinsic::getDeclaration(M.get(),
- Intrinsic::experimental_constrained_roundeven, { Type::getDoubleTy(Ctx) });
+ Function *Fn = Intrinsic::getOrInsertDeclaration(
+ M.get(), Intrinsic::experimental_constrained_roundeven,
+ {Type::getDoubleTy(Ctx)});
V = Builder.CreateConstrainedFPCall(Fn, { VDouble });
CII = cast<ConstrainedFPIntrinsic>(V);
EXPECT_EQ(Intrinsic::experimental_constrained_roundeven, CII->getIntrinsicID());
diff --git a/llvm/unittests/IR/IntrinsicsTest.cpp b/llvm/unittests/IR/IntrinsicsTest.cpp
index 0c4af28a2ab57b..7fe0bd79b80a60 100644
--- a/llvm/unittests/IR/IntrinsicsTest.cpp
+++ b/llvm/unittests/IR/IntrinsicsTest.cpp
@@ -50,7 +50,7 @@ class IntrinsicsTest : public ::testing::Test {
Instruction *makeIntrinsic(Intrinsic::ID ID) const {
IRBuilder<> Builder(BB);
SmallVector<Value *, 4> ProcessedArgs;
- auto *Decl = Intrinsic::getDeclaration(M.get(), ID);
+ auto *Decl = Intrinsic::getOrInsertDeclaration(M.get(), ID);
for (auto *Ty : Decl->getFunctionType()->params()) {
auto *Val = Constant::getNullValue(Ty);
ProcessedArgs.push_back(Val);
diff --git a/llvm/unittests/IR/PatternMatch.cpp b/llvm/unittests/IR/PatternMatch.cpp
index 13f121a2b9c7dd..7dc4b9f448d386 100644
--- a/llvm/unittests/IR/PatternMatch.cpp
+++ b/llvm/unittests/IR/PatternMatch.cpp
@@ -1766,7 +1766,7 @@ TEST_F(PatternMatchTest, IntrinsicMatcher) {
Value *Ops[] = {Name, Hash, Num, Index, Step};
Module *M = BB->getParent()->getParent();
Function *TheFn =
- Intrinsic::getDeclaration(M, Intrinsic::instrprof_increment_step);
+ Intrinsic::getOrInsertDeclaration(M, Intrinsic::instrprof_increment_step);
Value *Intrinsic5 = CallInst::Create(TheFn, Ops, "", BB);
diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp
index 925a69bafa07ef..d6ad7599ce4610 100644
--- a/llvm/unittests/IR/VPIntrinsicTest.cpp
+++ b/llvm/unittests/IR/VPIntrinsicTest.cpp
@@ -420,7 +420,7 @@ TEST_F(VPIntrinsicTest, VPToNonPredIntrinsicRoundTrip) {
ASSERT_TRUE(IsFullTrip);
}
-/// Check that VPIntrinsic::getDeclarationForParams works.
+/// Check that VPIntrinsic::getOrInsertDeclarationForParams works.
TEST_F(VPIntrinsicTest, VPIntrinsicDeclarationForParams) {
std::unique_ptr<Module> M = createVPDeclarationModule();
assert(M);
@@ -436,7 +436,7 @@ TEST_F(VPIntrinsicTest, VPIntrinsicDeclarationForParams) {
Values.push_back(UndefValue::get(ParamTy));
ASSERT_NE(F.getIntrinsicID(), Intrinsic::not_intrinsic);
- auto *NewDecl = VPIntrinsic::getDeclarationForParams(
+ auto *NewDecl = VPIntrinsic::getOrInsertDeclarationForParams(
OutM.get(), F.getIntrinsicID(), FuncTy->getReturnType(), Values);
ASSERT_TRUE(NewDecl);
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
index 372c5aaea59382..376b00224eb574 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanTest.cpp
@@ -1195,7 +1195,8 @@ TEST(VPRecipeTest, MayHaveSideEffectsAndMayReadWriteMemory) {
// Test for a call to a function without side-effects.
LLVMContext C;
Module M("", C);
- Function *TheFn = Intrinsic::getDeclaration(&M, Intrinsic::thread_pointer);
+ Function *TheFn =
+ Intrinsic::getOrInsertDeclaration(&M, Intrinsic::thread_pointer);
auto *Call = CallInst::Create(TheFn->getFunctionType(), TheFn);
VPValue Op1;
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index 5031426033aea1..448a171cf3e412 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -347,7 +347,7 @@ class LLVM_ConstrainedIntr<string mnem, int numArgs,
[&args](unsigned index) { return args[index]->getType(); });
llvm::Module *module = builder.GetInsertBlock()->getModule();
llvm::Function *callee =
- llvm::Intrinsic::getDeclaration(module,
+ llvm::Intrinsic::getOrInsertDeclaration(module,
llvm::Intrinsic::experimental_constrained_}] #
mnem # [{, overloadedTypes); }] #
!cond(!gt(hasRoundingMode, 0) : [{
@@ -541,7 +541,7 @@ class LLVM_DbgIntrOp<string name, string argName, list<Trait> traits = []>
llvm::Module *module = builder.GetInsertBlock()->getModule();
llvm::LLVMContext &ctx = module->getContext();
llvm::Function *fn =
- llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::}]
+ llvm::Intrinsic::getOrInsertDeclaration(module, llvm::Intrinsic::}]
# !subst(".", "_", name) # [{);
builder.CreateCall(fn, {
llvm::MetadataAsValue::get(ctx,
@@ -594,7 +594,7 @@ def LLVM_DbgLabelOp : LLVM_IntrOp<"dbg.label", [], [], [], 0> {
llvm::Module *module = builder.GetInsertBlock()->getModule();
llvm::LLVMContext &ctx = module->getContext();
llvm::Function *fn =
- llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::dbg_label);
+ llvm::Intrinsic::getOrInsertDeclaration(module, llvm::Intrinsic::dbg_label);
builder.CreateCall(fn, {
llvm::MetadataAsValue::get(ctx, moduleTranslation.translateDebugInfo($label))
});
diff --git a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
index 46b7b0a473c692..a8595d14ccf2e5 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.cpp
@@ -99,7 +99,8 @@ getOverloadedDeclaration(CallIntrinsicOp op, llvm::Intrinsic::ID id,
}
ArrayRef<llvm::Type *> overloadedArgTysRef = overloadedArgTys;
- return llvm::Intrinsic::getDeclaration(module, id, overloadedArgTysRef);
+ return llvm::Intrinsic::getOrInsertDeclaration(module, id,
+ overloadedArgTysRef);
}
static llvm::OperandBundleDef
@@ -143,7 +144,7 @@ convertCallLLVMIntrinsicOp(CallIntrinsicOp op, llvm::IRBuilderBase &builder,
return failure();
fn = *fnOrFailure;
} else {
- fn = llvm::Intrinsic::getDeclaration(module, id, {});
+ fn = llvm::Intrinsic::getOrInsertDeclaration(module, id, {});
}
// Check the result type of the call.
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index a5de90160c4145..add0a31c114f8d 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -839,7 +839,8 @@ llvm::CallInst *mlir::LLVM::detail::createIntrinsicCall(
llvm::IRBuilderBase &builder, llvm::Intrinsic::ID intrinsic,
ArrayRef<llvm::Value *> args, ArrayRef<llvm::Type *> tys) {
llvm::Module *module = builder.GetInsertBlock()->getModule();
- llvm::Function *fn = llvm::Intrinsic::getDeclaration(module, intrinsic, tys);
+ llvm::Function *fn =
+ llvm::Intrinsic::getOrInsertDeclaration(module, intrinsic, tys);
return builder.CreateCall(fn, args);
}
@@ -886,8 +887,8 @@ llvm::CallInst *mlir::LLVM::detail::createIntrinsicCall(
for (unsigned overloadedOperandIdx : overloadedOperands)
overloadedTypes.push_back(args[overloadedOperandIdx]->getType());
llvm::Module *module = builder.GetInsertBlock()->getModule();
- llvm::Function *llvmIntr =
- llvm::Intrinsic::getDeclaration(module, intrinsic, overloadedTypes);
+ llvm::Function *llvmIntr = llvm::Intrinsic::getOrInsertDeclaration(
+ module, intrinsic, overloadedTypes);
return builder.CreateCall(llvmIntr, args);
}
diff --git a/polly/lib/CodeGen/IslExprBuilder.cpp b/polly/lib/CodeGen/IslExprBuilder.cpp
index aaafac14bf8065..1688c41c624b24 100644
--- a/polly/lib/CodeGen/IslExprBuilder.cpp
+++ b/polly/lib/CodeGen/IslExprBuilder.cpp
@@ -129,16 +129,16 @@ Value *IslExprBuilder::createBinOp(BinaryOperator::BinaryOps Opc, Value *LHS,
Module *M = Builder.GetInsertBlock()->getModule();
switch (Opc) {
case Instruction::Add:
- F = Intrinsic::getDeclaration(M, Intrinsic::sadd_with_overflow,
- {LHS->getType()});
+ F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::sadd_with_overflow,
+ {LHS->getType()});
break;
case Instruction::Sub:
- F = Intrinsic::getDeclaration(M, Intrinsic::ssub_with_overflow,
- {LHS->getType()});
+ F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::ssub_with_overflow,
+ {LHS->getType()});
break;
case Instruction::Mul:
- F = Intrinsic::getDeclaration(M, Intrinsic::smul_with_overflow,
- {LHS->getType()});
+ F = Intrinsic::getOrInsertDeclaration(M, Intrinsic::smul_with_overflow,
+ {LHS->getType()});
break;
default:
llvm_unreachable("No overflow intrinsic for binary operator found!");
diff --git a/polly/lib/CodeGen/PerfMonitor.cpp b/polly/lib/CodeGen/PerfMonitor.cpp
index 3cad8537f3ee19..1a791614685443 100644
--- a/polly/lib/CodeGen/PerfMonitor.cpp
+++ b/polly/lib/CodeGen/PerfMonitor.cpp
@@ -59,7 +59,7 @@ void PerfMonitor::addToGlobalConstructors(Function *Fn) {
}
Function *PerfMonitor::getRDTSCP() {
- return Intrinsic::getDeclaration(M, Intrinsic::x86_rdtscp);
+ return Intrinsic::getOrInsertDeclaration(M, Intrinsic::x86_rdtscp);
}
PerfMonitor::PerfMonitor(const Scop &S, Module *M)
More information about the llvm-commits
mailing list