[clang] [llvm] [mlir] [polly] [IR][NFC] Update IRBuilder to use InsertPosition (PR #96497)
Stephen Tozer via cfe-commits
cfe-commits at lists.llvm.org
Mon Jun 24 09:07:23 PDT 2024
https://github.com/SLTozer updated https://github.com/llvm/llvm-project/pull/96497
>From 303d17dcea6bec43da2fdcf41ad8261fad7f1723 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Fri, 21 Jun 2024 09:02:26 +0100
Subject: [PATCH 1/2] [IR] Update IRBuilder::setInsertionPoint to use
InsertionPoint
Uses the new InsertPosition class to simplify some of the IRBuilder
interface, and removes the need to pass a BasicBlock alongside a
BasicBlock::iterator, using the fact that we can now get the parent basic
block from the iterator even if it points to the sentinel.
---
clang/lib/CodeGen/CGBlocks.cpp | 2 +-
clang/lib/CodeGen/CGGPUBuiltin.cpp | 4 +-
clang/lib/CodeGen/CGHLSLRuntime.cpp | 2 +-
clang/lib/CodeGen/CGObjC.cpp | 7 +-
clang/lib/CodeGen/CGObjCMac.cpp | 2 +-
clang/lib/CodeGen/CGOpenMPRuntime.cpp | 2 +-
clang/lib/CodeGen/CGStmt.cpp | 2 +-
clang/lib/CodeGen/CodeGenABITypes.cpp | 2 +-
clang/lib/CodeGen/CodeGenFunction.cpp | 4 +-
llvm/include/llvm/IR/IRBuilder.h | 78 ++++---------------
llvm/include/llvm/IR/Instruction.h | 8 +-
.../Utils/ScalarEvolutionExpander.h | 4 +-
llvm/lib/Analysis/MemoryBuiltins.cpp | 2 +-
llvm/lib/CodeGen/AtomicExpandPass.cpp | 8 +-
llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 +-
llvm/lib/CodeGen/ExpandLargeFpConvert.cpp | 4 +-
llvm/lib/CodeGen/ExpandMemCmp.cpp | 6 +-
llvm/lib/CodeGen/ExpandVectorPredication.cpp | 2 +-
llvm/lib/CodeGen/HardwareLoops.cpp | 2 +-
llvm/lib/CodeGen/IntrinsicLowering.cpp | 2 +-
llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp | 2 +-
llvm/lib/CodeGen/SafeStack.cpp | 2 +-
llvm/lib/CodeGen/ShadowStackGCLowering.cpp | 4 +-
llvm/lib/CodeGen/SjLjEHPrepare.cpp | 5 +-
llvm/lib/CodeGen/WasmEHPrepare.cpp | 2 +-
llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp | 29 ++++---
llvm/lib/IR/AutoUpgrade.cpp | 4 +-
llvm/lib/IR/Core.cpp | 2 +-
llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp | 4 +-
.../Target/AMDGPU/AMDGPUAtomicOptimizer.cpp | 2 +-
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 2 +-
llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp | 2 +-
.../AMDGPU/AMDGPULowerKernelArguments.cpp | 2 +-
.../AMDGPU/AMDGPULowerModuleLDSPass.cpp | 2 +-
.../Target/AMDGPU/SIAnnotateControlFlow.cpp | 2 +-
llvm/lib/Target/ARM/ARMParallelDSP.cpp | 6 +-
.../Target/ARM/MVELaneInterleavingPass.cpp | 2 +-
llvm/lib/Target/ARM/MVETailPredication.cpp | 2 +-
.../Hexagon/HexagonLoopIdiomRecognition.cpp | 4 +-
.../Target/Hexagon/HexagonVectorCombine.cpp | 6 +-
.../Hexagon/HexagonVectorLoopCarriedReuse.cpp | 2 +-
llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp | 2 +-
llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp | 2 +-
.../WebAssemblyLowerEmscriptenEHSjLj.cpp | 2 +-
llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp | 4 +-
llvm/lib/Target/X86/X86LowerAMXType.cpp | 3 +-
llvm/lib/Target/X86/X86WinEHState.cpp | 2 +-
.../AggressiveInstCombine.cpp | 2 +-
llvm/lib/Transforms/Coroutines/CoroFrame.cpp | 12 ++-
.../Transforms/IPO/AttributorAttributes.cpp | 2 +-
llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 3 +-
.../InstCombine/InstCombineAndOrXor.cpp | 2 +-
.../InstCombine/InstCombineCompares.cpp | 4 +-
.../Transforms/InstCombine/InstCombinePHI.cpp | 4 +-
.../InstCombine/InstCombineSelect.cpp | 2 +-
.../InstCombine/InstCombineVectorOps.cpp | 2 +-
.../Instrumentation/AddressSanitizer.cpp | 5 +-
.../Instrumentation/BoundsChecking.cpp | 4 +-
.../Instrumentation/DataFlowSanitizer.cpp | 44 +++++------
.../Instrumentation/GCOVProfiling.cpp | 2 +-
.../Instrumentation/HWAddressSanitizer.cpp | 2 +-
.../Instrumentation/MemProfiler.cpp | 2 +-
.../Instrumentation/PGOInstrumentation.cpp | 6 +-
llvm/lib/Transforms/ObjCARC/ObjCARC.cpp | 2 +-
.../Scalar/ConstraintElimination.cpp | 2 +-
llvm/lib/Transforms/Scalar/GuardWidening.cpp | 2 +-
llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp | 2 +-
.../Transforms/Scalar/LoopIdiomRecognize.cpp | 6 +-
.../lib/Transforms/Scalar/LoopPredication.cpp | 2 +-
.../Scalar/LowerMatrixIntrinsics.cpp | 6 +-
.../Scalar/PartiallyInlineLibCalls.cpp | 2 +-
.../Scalar/RewriteStatepointsForGC.cpp | 4 +-
llvm/lib/Transforms/Scalar/SROA.cpp | 5 +-
.../Scalar/ScalarizeMaskedMemIntrin.cpp | 14 ++--
llvm/lib/Transforms/Scalar/Scalarizer.cpp | 28 ++++---
.../lib/Transforms/Utils/AMDGPUEmitPrintf.cpp | 4 +-
.../Transforms/Utils/BypassSlowDivision.cpp | 10 +--
.../Transforms/Utils/CallPromotionUtils.cpp | 2 +-
llvm/lib/Transforms/Utils/FlattenCFG.cpp | 3 +-
llvm/lib/Transforms/Utils/InlineFunction.cpp | 8 +-
llvm/lib/Transforms/Utils/IntegerDivision.cpp | 2 +-
llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp | 2 +-
.../Utils/ScalarEvolutionExpander.cpp | 9 +--
llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 2 +-
llvm/lib/Transforms/Utils/SimplifyIndVar.cpp | 5 +-
.../lib/Transforms/Utils/SimplifyLibCalls.cpp | 4 +-
.../Vectorize/LoopIdiomVectorize.cpp | 2 +-
.../Transforms/Vectorize/LoopVectorize.cpp | 8 +-
.../Transforms/Vectorize/SLPVectorizer.cpp | 30 +++----
llvm/unittests/Analysis/MemorySSATest.cpp | 24 +++---
.../Frontend/OpenMPIRBuilderTest.cpp | 16 ++--
llvm/unittests/IR/BasicBlockTest.cpp | 2 +-
llvm/unittests/IR/DebugInfoTest.cpp | 2 +-
llvm/unittests/IR/IRBuilderTest.cpp | 6 +-
.../Transforms/Utils/SSAUpdaterBulkTest.cpp | 2 +-
.../OpenMP/OpenMPToLLVMIRTranslation.cpp | 5 +-
polly/lib/CodeGen/BlockGenerators.cpp | 4 +-
97 files changed, 249 insertions(+), 326 deletions(-)
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 5dac1cd425bf6..8b1de12cc0ad7 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1545,7 +1545,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
entry_ptr = entry_ptr->getNextNonDebugInstruction()->getIterator();
else
entry_ptr = entry->end();
- Builder.SetInsertPoint(entry, entry_ptr);
+ Builder.SetInsertPoint(entry_ptr);
// Emit debug information for all the DeclRefExprs.
// FIXME: also for 'this'
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index bd95541647bcf..a0d5768b62f2a 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -202,13 +202,13 @@ RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
Args.push_back(Arg);
}
- llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
+ llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
bool isBuffered = (CGM.getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
clang::TargetOptions::AMDGPUPrintfKind::Buffered);
auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args, isBuffered);
- Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
+ Builder.SetInsertPoint(IRB.GetInsertPoint());
return RValue::get(Printf);
}
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index 55ba21ae2ba69..c9f7006b19d15 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -436,7 +436,7 @@ void CGHLSLRuntime::generateGlobalCtorDtorCalls() {
for (auto &F : M.functions()) {
if (!F.hasFnAttribute("hlsl.shader"))
continue;
- IRBuilder<> B(&F.getEntryBlock(), F.getEntryBlock().begin());
+ IRBuilder<> B(F.getEntryBlock().begin());
for (auto *Fn : CtorFns)
B.CreateCall(FunctionCallee(Fn));
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 281b2d9795f6c..12b5412ddb672 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -2970,13 +2970,12 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
value = doFallback(CGF, value);
} else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
// Place the retain immediately following the call.
- CGF.Builder.SetInsertPoint(call->getParent(),
- ++llvm::BasicBlock::iterator(call));
+ CGF.Builder.SetInsertPoint(++llvm::BasicBlock::iterator(call));
value = doAfterCall(CGF, value);
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
// Place the retain at the beginning of the normal destination block.
llvm::BasicBlock *BB = invoke->getNormalDest();
- CGF.Builder.SetInsertPoint(BB, BB->begin());
+ CGF.Builder.SetInsertPoint(BB->begin());
value = doAfterCall(CGF, value);
// Bitcasts can arise because of related-result returns. Rewrite
@@ -2984,7 +2983,7 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
// Change the insert point to avoid emitting the fall-back call after the
// bitcast.
- CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator());
+ CGF.Builder.SetInsertPoint(bitcast->getIterator());
llvm::Value *operand = bitcast->getOperand(0);
operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
bitcast->setOperand(0, operand);
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 30f3911a8b03c..b7debc8fb469d 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -4417,7 +4417,7 @@ void FragileHazards::emitHazardsInNewBlocks() {
// call. If the call throws, then this is sufficient to
// guarantee correctness as long as it doesn't also write to any
// locals.
- Builder.SetInsertPoint(&BB, BI);
+ Builder.SetInsertPoint(BI);
emitReadHazard(Builder);
}
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index f6d12d46cfc07..532c40841f20f 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1447,7 +1447,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (!Elem.second.ServiceInsertPt)
setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
+ CGF.Builder.SetInsertPoint(&*Elem.second.ServiceInsertPt);
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
llvm::CallInst *Call = CGF.Builder.CreateCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 39222c0e65353..1a2a76ec2dd85 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -3076,7 +3076,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (IsGCCAsmGoto && !CBRRegResults.empty()) {
for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
- Builder.SetInsertPoint(Succ, --(Succ->end()));
+ Builder.SetInsertPoint(--(Succ->end()));
EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
ResultTypeRequiresCast, ResultRegIsFlagReg);
diff --git a/clang/lib/CodeGen/CodeGenABITypes.cpp b/clang/lib/CodeGen/CodeGenABITypes.cpp
index a6073e1188d6f..4c2e9b8233057 100644
--- a/clang/lib/CodeGen/CodeGenABITypes.cpp
+++ b/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -123,7 +123,7 @@ llvm::Value *CodeGen::getCXXDestructorImplicitParam(
CGF.CurCodeDecl = D;
CGF.CurFuncDecl = D;
CGF.CurFn = InsertBlock->getParent();
- CGF.Builder.SetInsertPoint(InsertBlock, InsertPoint);
+ CGF.Builder.SetInsertPoint(InsertPoint);
return CGM.getCXXABI().getCXXDestructorImplicitParam(
CGF, D, Type, ForVirtualBase, Delegating);
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 200c40da8bc43..3f8c900a00455 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2759,7 +2759,7 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
if (!CGM.getCodeGenOpts().SanitizeStats)
return;
- llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
+ llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
CGM.getSanStats().create(IRB, SSK);
}
@@ -2878,7 +2878,7 @@ void CodeGenFunction::EmitAArch64MultiVersionResolver(
}
if (!AArch64CpuInitialized) {
- Builder.SetInsertPoint(CurBlock, CurBlock->begin());
+ Builder.SetInsertPoint(CurBlock->begin());
EmitAArch64CpuInit();
AArch64CpuInitialized = true;
Builder.SetInsertPoint(CurBlock);
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index c10ea33a4ee13..fc660fda8aaf4 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -173,37 +173,13 @@ class IRBuilderBase {
BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
LLVMContext &getContext() const { return Context; }
- /// This specifies that created instructions should be appended to the
- /// end of the specified block.
- void SetInsertPoint(BasicBlock *TheBB) {
- BB = TheBB;
- InsertPt = BB->end();
- }
-
- /// This specifies that created instructions should be inserted before
- /// the specified instruction.
- void SetInsertPoint(Instruction *I) {
- BB = I->getParent();
- InsertPt = I->getIterator();
- assert(InsertPt != BB->end() && "Can't read debug loc from end()");
- SetCurrentDebugLocation(I->getStableDebugLoc());
- }
-
/// This specifies that created instructions should be inserted at the
- /// specified point.
- void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
- BB = TheBB;
- InsertPt = IP;
- if (IP != TheBB->end())
- SetCurrentDebugLocation(IP->getStableDebugLoc());
- }
-
- /// This specifies that created instructions should be inserted at
- /// the specified point, but also requires that \p IP is dereferencable.
- void SetInsertPoint(BasicBlock::iterator IP) {
- BB = IP->getParent();
+ /// specified insert position.
+ void SetInsertPoint(InsertPosition IP) {
+ BB = IP.getBasicBlock();
InsertPt = IP;
- SetCurrentDebugLocation(IP->getStableDebugLoc());
+ if (InsertPt != BB->end())
+ SetCurrentDebugLocation(InsertPt->getStableDebugLoc());
}
/// This specifies that created instructions should inserted at the beginning
@@ -286,7 +262,7 @@ class IRBuilderBase {
/// Sets the current insert point to a previously-saved location.
void restoreIP(InsertPoint IP) {
if (IP.isSet())
- SetInsertPoint(IP.getBlock(), IP.getPoint());
+ SetInsertPoint(IP.getPoint());
else
ClearInsertionPoint();
}
@@ -2677,44 +2653,20 @@ class IRBuilder : public IRBuilderBase {
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
: IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
- explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
- FPMathTag, OpBundles),
- Folder(Folder) {
- SetInsertPoint(TheBB);
- }
-
- explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
+ explicit IRBuilder(InsertPosition IP, MDNode *FPMathTag = nullptr,
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
- FPMathTag, OpBundles) {
- SetInsertPoint(TheBB);
- }
-
- explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag,
- OpBundles) {
+ : IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
+ this->Inserter, FPMathTag, OpBundles) {
SetInsertPoint(IP);
}
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
- FPMathTag, OpBundles),
+ explicit IRBuilder(InsertPosition IP, FolderTy Folder,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
+ : IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
+ this->Inserter, FPMathTag, OpBundles),
Folder(Folder) {
- SetInsertPoint(TheBB, IP);
- }
-
- IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
- MDNode *FPMathTag = nullptr,
- ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
- FPMathTag, OpBundles) {
- SetInsertPoint(TheBB, IP);
+ SetInsertPoint(IP);
}
/// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 2e72f6742a659..c315a41ebcf8b 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -44,6 +44,12 @@ template <> struct ilist_alloc_traits<Instruction> {
iterator_range<simple_ilist<DbgRecord>::iterator>
getDbgRecordRange(DbgMarker *);
+/// Class used to generate an insert position (ultimately always a
+/// BasicBlock::iterator, which it will implicitly convert to) from either:
+/// - An Instruction, inserting immediately prior.
+/// - A BasicBlock, inserting at the end.
+/// - An iterator, inserting at its position.
+/// - Any nullptr value, giving a blank iterator (not valid for insertion).
class InsertPosition {
using InstListType = SymbolTableList<Instruction, ilist_iterator_bits<true>,
ilist_parent<BasicBlock>>;
@@ -51,8 +57,6 @@ class InsertPosition {
public:
InsertPosition(std::nullptr_t) : InsertAt() {}
- // LLVM_DEPRECATED("Use BasicBlock::iterators for insertion instead",
- // "BasicBlock::iterator")
InsertPosition(Instruction *InsertBefore);
InsertPosition(BasicBlock *InsertAtEnd);
InsertPosition(InstListType::iterator InsertAt) : InsertAt(InsertAt) {}
diff --git a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
index 62c1e15a9a60e..e7a1ab08ed75d 100644
--- a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
+++ b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -376,9 +376,7 @@ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
Builder.SetInsertPoint(IP);
}
- void setInsertPoint(BasicBlock::iterator IP) {
- Builder.SetInsertPoint(IP->getParent(), IP);
- }
+ void setInsertPoint(BasicBlock::iterator IP) { Builder.SetInsertPoint(IP); }
/// Clear the current insertion point. This is useful if the instruction
/// that had been serving as the insertion point may have been deleted.
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 8ca15434833d9..5b12024da0e11 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -1217,7 +1217,7 @@ SizeOffsetValue ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
// Compute offset/size for each PHI incoming pointer.
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
BasicBlock *IncomingBlock = PHI.getIncomingBlock(i);
- Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(IncomingBlock->getFirstInsertionPt());
SizeOffsetValue EdgeData = compute_(PHI.getIncomingValue(i));
if (!EdgeData.bothKnown()) {
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 7728cc50fc9f9..9f6552c5dfc1d 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1242,7 +1242,7 @@ Value *AtomicExpandImpl::insertRMWLLSCLoop(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
- Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB->begin());
return Loaded;
}
@@ -1478,7 +1478,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// succeeded or not. We expose this to later passes by converting any
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
// PHI.
- Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB->begin());
PHINode *LoadedExit =
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
@@ -1491,7 +1491,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// a type wider than the one in the cmpxchg instruction.
Value *LoadedFull = LoadedExit;
- Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator()));
+ Builder.SetInsertPoint(std::next(Success->getIterator()));
Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV);
// Look for any users of the cmpxchg that are just comparing the loaded value
@@ -1616,7 +1616,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
Builder.CreateCondBr(Success, ExitBB, LoopBB);
- Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB->begin());
return NewLoaded;
}
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index f8fdba2e35dd7..a6e101c32d7bf 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2355,7 +2355,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
// Create a PHI in the end block to select either the output of the intrinsic
// or the bit width of the operand.
- Builder.SetInsertPoint(EndBlock, EndBlock->begin());
+ Builder.SetInsertPoint(EndBlock->begin());
PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
@@ -6306,7 +6306,7 @@ bool CodeGenPrepare::splitLargeGEPOffsets() {
NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
}
- IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
+ IRBuilder<> NewBaseBuilder(NewBaseInsertPt);
// Create a new base.
Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
NewBaseGEP = OldBase;
diff --git a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
index 11f123aa5bed8..2c05f01726770 100644
--- a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
+++ b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
@@ -212,7 +212,7 @@ static void expandFPToI(Instruction *FPToI) {
Builder.CreateBr(End);
// cleanup:
- Builder.SetInsertPoint(End, End->begin());
+ Builder.SetInsertPoint(End->begin());
PHINode *Retval0 = Builder.CreatePHI(FPToI->getType(), 4);
Retval0->addIncoming(Cond8, IfThen5);
@@ -560,7 +560,7 @@ static void expandIToFP(Instruction *IToFP) {
Builder.CreateBr(End);
// return:
- Builder.SetInsertPoint(End, End->begin());
+ Builder.SetInsertPoint(End->begin());
PHINode *Retval0 = Builder.CreatePHI(IToFP->getType(), 2);
Retval0->addIncoming(A4, IfEnd26);
Retval0->addIncoming(ConstantFP::getZero(IToFP->getType(), false), Entry);
diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
index bb84813569f4d..3e59834e4f1c8 100644
--- a/llvm/lib/CodeGen/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -574,7 +574,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
// need to be calculated and can simply return 1.
if (IsUsedForZeroCmp) {
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(ResBlock.BB, InsertPt);
+ Builder.SetInsertPoint(InsertPt);
Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
PhiRes->addIncoming(Res, ResBlock.BB);
BranchInst *NewBr = BranchInst::Create(EndBlock);
@@ -584,7 +584,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
return;
}
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(ResBlock.BB, InsertPt);
+ Builder.SetInsertPoint(InsertPt);
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
ResBlock.PhiSrc2);
@@ -611,7 +611,7 @@ void MemCmpExpansion::setupResultBlockPHINodes() {
}
void MemCmpExpansion::setupEndBlockPHINodes() {
- Builder.SetInsertPoint(EndBlock, EndBlock->begin());
+ Builder.SetInsertPoint(EndBlock->begin());
PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
}
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index dc35f33a3a059..a63a8681f5140 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -667,7 +667,7 @@ void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
auto *M = VPI.getModule();
Function *VScaleFunc =
Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
- IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
+ IRBuilder<> Builder(VPI.getIterator());
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index cc5aad14e1b56..200b772f88b8b 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -580,7 +580,7 @@ PHINode* HardwareLoop::InsertPHICounter(Value *NumElts, Value *EltsRem) {
BasicBlock *Preheader = L->getLoopPreheader();
BasicBlock *Header = L->getHeader();
BasicBlock *Latch = ExitBranch->getParent();
- IRBuilder<> Builder(Header, Header->getFirstNonPHIIt());
+ IRBuilder<> Builder(Header->getFirstNonPHIIt());
PHINode *Index = Builder.CreatePHI(NumElts->getType(), 2);
Index->addIncoming(NumElts, Preheader);
Index->addIncoming(EltsRem, Latch);
diff --git a/llvm/lib/CodeGen/IntrinsicLowering.cpp b/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 45fba4341ad00..65262cf4f8f9e 100644
--- a/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -40,7 +40,7 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
FunctionCallee FCache =
M->getOrInsertFunction(NewFn, FunctionType::get(RetTy, ParamTys, false));
- IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+ IRBuilder<> Builder(CI->getIterator());
SmallVector<Value *, 8> Args(ArgBegin, ArgEnd);
CallInst *NewCI = Builder.CreateCall(FCache, Args);
NewCI->setName(CI->getName());
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 0777acf633187..34711e7b938ce 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -144,7 +144,7 @@ static bool lowerObjCCall(Function &F, const char *NewFn,
auto *CI = cast<CallInst>(CB);
assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
- IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+ IRBuilder<> Builder(CI->getIterator());
SmallVector<Value *, 8> Args(CI->args());
SmallVector<llvm::OperandBundleDef, 1> BundleList;
CI->getOperandBundlesAsDefs(BundleList);
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index 0a26247a4d165..f695420dc207e 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -784,7 +784,7 @@ bool SafeStack::run() {
if (!StackRestorePoints.empty())
++NumUnsafeStackRestorePointsFunctions;
- IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
+ IRBuilder<> IRB(F.begin()->getFirstInsertionPt());
// Calls must always have a debug location, or else inlining breaks. So
// we explicitly set a artificial debug location here.
if (DISubprogram *SP = F.getSubprogram())
diff --git a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
index 232e5e2bb886d..7497bf30b5332 100644
--- a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -351,7 +351,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
- IRBuilder<> AtEntry(IP->getParent(), IP);
+ IRBuilder<> AtEntry(IP);
Instruction *StackEntry =
AtEntry.CreateAlloca(ConcreteStackEntryTy, nullptr, "gc_frame");
@@ -384,7 +384,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
// shadow stack.
while (isa<StoreInst>(IP))
++IP;
- AtEntry.SetInsertPoint(IP->getParent(), IP);
+ AtEntry.SetInsertPoint(IP);
// Push the entry onto the shadow stack.
Instruction *EntryNextPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 20c827ce08d84..330d94b7a6689 100644
--- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -182,7 +182,7 @@ void SjLjEHPrepareImpl::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
Type *LPadType = LPI->getType();
Value *LPadVal = PoisonValue::get(LPadType);
auto *SelI = cast<Instruction>(SelVal);
- IRBuilder<> Builder(SelI->getParent(), std::next(SelI->getIterator()));
+ IRBuilder<> Builder(std::next(SelI->getIterator()));
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
@@ -206,8 +206,7 @@ SjLjEHPrepareImpl::setupFunctionContext(Function &F,
// Fill in the function context structure.
for (LandingPadInst *LPI : LPads) {
- IRBuilder<> Builder(LPI->getParent(),
- LPI->getParent()->getFirstInsertionPt());
+ IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
// Reference the __data field.
Value *FCData =
diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp
index 16c1dcb1e1175..a9322dd74ce77 100644
--- a/llvm/lib/CodeGen/WasmEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp
@@ -303,7 +303,7 @@ void WasmEHPrepareImpl::prepareEHPad(BasicBlock *BB, bool NeedPersonality,
unsigned Index) {
assert(BB->isEHPad() && "BB is not an EHPad!");
IRBuilder<> IRB(BB->getContext());
- IRB.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ IRB.SetInsertPoint(BB->getFirstInsertionPt());
auto *FPI = cast<FuncletPadInst>(BB->getFirstNonPHI());
Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr;
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 9eafc9f90cc1b..4403062d27386 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1160,7 +1160,7 @@ void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
FI.FiniCB(Builder.saveIP());
// The continuation block is where code generation continues.
- Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
+ Builder.SetInsertPoint(NonCancellationBlock->begin());
}
// Callback used to create OpenMP runtime calls to support
@@ -1194,7 +1194,7 @@ static void targetParallelCallback(
// Add alloca for kernel args
OpenMPIRBuilder ::InsertPointTy CurrentIP = Builder.saveIP();
- Builder.SetInsertPoint(OuterAllocaBB, OuterAllocaBB->getFirstInsertionPt());
+ Builder.SetInsertPoint(OuterAllocaBB->getFirstInsertionPt());
AllocaInst *ArgsAlloca =
Builder.CreateAlloca(ArrayType::get(PtrTy, NumCapturedVars));
Value *Args = ArgsAlloca;
@@ -1569,8 +1569,7 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
// Store to stack at end of the block that currently branches to the entry
// block of the to-be-outlined region.
- Builder.SetInsertPoint(InsertBB,
- InsertBB->getTerminator()->getIterator());
+ Builder.SetInsertPoint(InsertBB->getTerminator()->getIterator());
Builder.CreateStore(&V, Ptr);
// Load back next to allocations in the to-be-outlined region.
@@ -1938,7 +1937,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
StaleCI->eraseFromParent();
- Builder.SetInsertPoint(TaskAllocaBB, TaskAllocaBB->begin());
+ Builder.SetInsertPoint(TaskAllocaBB->begin());
if (HasShareds) {
LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
OutlinedFn.getArg(1)->replaceUsesWithIf(
@@ -1952,7 +1951,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
};
addOutlineInfo(std::move(OI));
- Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin());
+ Builder.SetInsertPoint(TaskExitBB->begin());
return Builder.saveIP();
}
@@ -2160,7 +2159,7 @@ OpenMPIRBuilder::createReductions(const LocationDescription &Loc,
Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator());
Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
- Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
+ Builder.SetInsertPoint(InsertBlock->end());
for (auto En : enumerate(ReductionInfos)) {
unsigned Index = En.index();
@@ -2599,15 +2598,13 @@ OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
// the latch block.
CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
- Builder.SetInsertPoint(CLI->getBody(),
- CLI->getBody()->getFirstInsertionPt());
+ Builder.SetInsertPoint(CLI->getBody()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(DL);
return Builder.CreateAdd(OldIV, LowerBound);
});
// In the "exit" block, call the "fini" function.
- Builder.SetInsertPoint(CLI->getExit(),
- CLI->getExit()->getTerminator()->getIterator());
+ Builder.SetInsertPoint(CLI->getExit()->getTerminator()->getIterator());
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
// Add the barrier if requested.
@@ -2748,7 +2745,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
});
// In the "exit" block, call the "fini" function.
- Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
+ Builder.SetInsertPoint(DispatchExit->getFirstInsertionPt());
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
// Add the barrier if requested.
@@ -3167,7 +3164,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
PreHeader->getParent());
// This needs to be 32-bit always, so can't use the IVTy Zero above.
- Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
+ Builder.SetInsertPoint(OuterCond->getFirstInsertionPt());
Value *Res =
Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
PLowerBound, PUpperBound, PStride});
@@ -3192,7 +3189,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
// Modify the inner condition:
// * Use the UpperBound returned from the DynamicNext call.
// * jump to the loop outer loop when done with one of the inner loops.
- Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
+ Builder.SetInsertPoint(Cond->getFirstInsertionPt());
UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
Instruction *Comp = &*Builder.GetInsertPoint();
auto *CI = cast<CmpInst>(Comp);
@@ -6340,7 +6337,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
BasicBlock &OuterAllocaBB = CurrentFunction->getEntryBlock();
if (&OuterAllocaBB == Builder.GetInsertBlock()) {
BasicBlock *BodyBB = splitBB(Builder, /*CreateBranch=*/true, "teams.entry");
- Builder.SetInsertPoint(BodyBB, BodyBB->begin());
+ Builder.SetInsertPoint(BodyBB->begin());
}
// The current basic block is split into four basic blocks. After outlining,
@@ -6464,7 +6461,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
addOutlineInfo(std::move(OI));
- Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB->begin());
return Builder.saveIP();
}
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 2f4b8351e747a..cb692f2ffdbd9 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -2432,7 +2432,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
LLVMContext &C = CI->getContext();
IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
+ Builder.SetInsertPoint(CI->getIterator());
if (!NewFn) {
bool FallthroughToDefaultUpgrade = false;
@@ -4970,7 +4970,7 @@ void llvm::UpgradeARCRuntime(Module &M) {
if (!CI || CI->getCalledFunction() != Fn)
continue;
- IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+ IRBuilder<> Builder(CI->getIterator());
FunctionType *NewFuncTy = NewFn->getFunctionType();
SmallVector<Value *, 2> Args;
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 3b6b01fb78b0a..6b265bfe84d5b 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -3136,7 +3136,7 @@ static void LLVMPositionBuilderImpl(IRBuilder<> *Builder, BasicBlock *Block,
Instruction *Instr, bool BeforeDbgRecords) {
BasicBlock::iterator I = Instr ? Instr->getIterator() : Block->end();
I.setHeadBit(BeforeDbgRecords);
- Builder->SetInsertPoint(Block, I);
+ Builder->SetInsertPoint(I);
}
void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
diff --git a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
index fe68203ad5392..00881720c34db 100644
--- a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
+++ b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
@@ -158,7 +158,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
LLVMContext &Ctx = BB.getContext();
IRBuilder<> Builder(Ctx);
- Builder.SetInsertPoint(&BB, ++MostEncompassingPTrue->getIterator());
+ Builder.SetInsertPoint(++MostEncompassingPTrue->getIterator());
auto *MostEncompassingPTrueVTy =
cast<VectorType>(MostEncompassingPTrue->getType());
@@ -175,7 +175,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
if (MostEncompassingPTrueVTy != PTrueVTy) {
ConvertFromCreated = true;
- Builder.SetInsertPoint(&BB, ++ConvertToSVBool->getIterator());
+ Builder.SetInsertPoint(++ConvertToSVBool->getIterator());
auto *ConvertFromSVBool =
Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
{PTrueVTy}, {ConvertToSVBool});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 38cc5a9bef969..8b5e385a6b153 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -994,7 +994,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
if (IsPixelShader) {
// Need a final PHI to reconverge to above the helper lane branch mask.
- B.SetInsertPoint(PixelExitBB, PixelExitBB->getFirstNonPHIIt());
+ B.SetInsertPoint(PixelExitBB->getFirstNonPHIIt());
PHINode *const PHI = B.CreatePHI(Ty, 2);
PHI->addIncoming(PoisonValue::get(Ty), PixelEntryBB);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 6e7d34f5adaa3..b067b8b930288 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1118,7 +1118,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
if (ReqdAccuracy < 1.0f)
return false;
- IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
+ IRBuilder<> Builder(std::next(FDiv.getIterator()));
Builder.setFastMathFlags(DivFMF);
Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 456f3cb332cf8..9af645d1cf585 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -1328,7 +1328,7 @@ AMDGPULibCalls::insertSinCos(Value *Arg, FastMathFlags FMF, IRBuilder<> &B,
// sincos call there. Otherwise, right after the allocas works well enough
// if it's an argument or constant.
- B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
+ B.SetInsertPoint(++ArgInst->getIterator());
// SetInsertPoint unwelcomely always tries to set the debug loc.
B.SetCurrentDebugLocation(DL);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index 1873fdb4d2596..dce0e5f46ba46 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -106,7 +106,7 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
LLVMContext &Ctx = F.getParent()->getContext();
const DataLayout &DL = F.getParent()->getDataLayout();
BasicBlock &EntryBlock = *F.begin();
- IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock));
+ IRBuilder<> Builder(getInsertPt(EntryBlock));
const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 2bdbf4151dd95..58ba577956dbb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -283,7 +283,7 @@ class AMDGPULowerModuleLDS {
// codegen would suffice for that, but one would still need to ensure that
// the variables are allocated in the anticpated order.
BasicBlock *Entry = &Func->getEntryBlock();
- IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
+ IRBuilder<> Builder(Entry->getFirstNonPHIIt());
Function *Decl =
Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index 08e1d6b87b0df..f629b6474898c 100644
--- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -336,7 +336,7 @@ bool SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
// Split edge to make Def dominate Use
FirstInsertionPt = SplitEdge(DefBB, BB, DT, LI)->getFirstInsertionPt();
}
- IRBuilder<> IRB(FirstInsertionPt->getParent(), FirstInsertionPt);
+ IRBuilder<> IRB(FirstInsertionPt);
// TODO: StructurizeCFG 'Flow' blocks have debug locations from the
// condition, for now just avoid copying these DebugLocs so that stepping
// out of the then/else block in a debugger doesn't step to the condition.
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index 8c1b3325f155f..2925bde56dfd1 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -637,8 +637,7 @@ void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
- IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
- BasicBlock::iterator(InsertAfter));
+ IRBuilder<NoFolder> Builder((BasicBlock::iterator(InsertAfter)));
Instruction *Call = Builder.CreateCall(SMLAD, Args);
NumSMLAD++;
return Call;
@@ -758,8 +757,7 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
// Insert the load at the point of the original dominating load.
LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
- IRBuilder<NoFolder> IRB(DomLoad->getParent(),
- ++BasicBlock::iterator(DomLoad));
+ IRBuilder<NoFolder> IRB(++BasicBlock::iterator(DomLoad));
// Create the wide load, while making sure to maintain the original alignment
// as this prevents ldrd from being generated when it could be illegal due to
diff --git a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
index 5ac79cbfe6d98..0352b9d3fb2e3 100644
--- a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
+++ b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
@@ -382,7 +382,7 @@ static bool tryInterleave(Instruction *Start,
for (Instruction *I : Truncs) {
LLVM_DEBUG(dbgs() << "Replacing trunc " << *I << "\n");
- Builder.SetInsertPoint(I->getParent(), ++I->getIterator());
+ Builder.SetInsertPoint(++I->getIterator());
Value *Shuf = Builder.CreateShuffleVector(I, TruncMask);
I->replaceAllUsesWith(Shuf);
cast<Instruction>(Shuf)->setOperand(0, I);
diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index fe97d4f758997..11c85bd07ba48 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -381,7 +381,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
cast<FixedVectorType>(ActiveLaneMask->getType())->getNumElements();
// Insert a phi to count the number of elements processed by the loop.
- Builder.SetInsertPoint(L->getHeader(), L->getHeader()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(L->getHeader()->getFirstNonPHIIt());
PHINode *Processed = Builder.CreatePHI(Ty, 2);
Processed->addIncoming(Start, L->getLoopPreheader());
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 7777ae23e8aec..50a997083f33d 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1115,7 +1115,7 @@ bool PolynomialMultiplyRecognize::promoteTypes(BasicBlock *LoopB,
assert(Ty0 == DestTy);
// In order to create the trunc, P must have the promoted type.
P->mutateType(Ty0);
- Value *T = IRBuilder<>(ExitB, End).CreateTrunc(P, PTy);
+ Value *T = IRBuilder<>(End).CreateTrunc(P, PTy);
// In order for the RAUW to work, the types of P and T must match.
P->mutateType(PTy);
P->replaceAllUsesWith(T);
@@ -1462,7 +1462,7 @@ bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
// them right after the loop exit.
// Take advantage of the loop-closed SSA form, which has all the post-
// loop values in phi nodes.
- IRB.SetInsertPoint(ExitB, ExitB->getFirstInsertionPt());
+ IRB.SetInsertPoint(ExitB->getFirstInsertionPt());
for (auto P = ExitB->begin(), Q = ExitB->end(); P != Q; ++P) {
if (!isa<PHINode>(P))
break;
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 797b798520aa1..838b78fb4e2e3 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -1460,8 +1460,7 @@ auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
InsertAt = &*std::next(InsertAt->getIterator());
}
- IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(),
- InstSimplifyFolder(HVC.DL));
+ IRBuilder Builder(InsertAt->getIterator(), InstSimplifyFolder(HVC.DL));
Value *AlignAddr = nullptr; // Actual aligned address.
Value *AlignVal = nullptr; // Right-shift amount (for valign).
@@ -1741,8 +1740,7 @@ auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const
// TODO: Add multiplication of vectors by scalar registers (up to 4 bytes).
Value *X = Op.X.Val, *Y = Op.Y.Val;
- IRBuilder Builder(In.getParent(), In.getIterator(),
- InstSimplifyFolder(HVC.DL));
+ IRBuilder Builder(In.getIterator(), InstSimplifyFolder(HVC.DL));
auto roundUpWidth = [](unsigned Width) -> unsigned {
if (Width <= 32 && !isPowerOf2_32(Width)) {
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
index f4f966e772b51..1ff31628195e0 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
@@ -552,7 +552,7 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
}
BasicBlock *BB = BEInst->getParent();
IRBuilder<> IRB(BB);
- IRB.SetInsertPoint(BB, BB->getFirstNonPHIIt());
+ IRB.SetInsertPoint(BB->getFirstNonPHIIt());
Value *BEVal = BEInst;
PHINode *NewPhi;
for (int i = Iterations-1; i >=0 ; --i) {
diff --git a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
index 4a3b64f30d8d4..31bed914a487d 100644
--- a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
@@ -111,7 +111,7 @@ class PPCBoolRetToInt : public FunctionPass {
if (auto *I = dyn_cast<Instruction>(V))
IRB.SetInsertPoint(I->getNextNode());
else
- IRB.SetInsertPoint(&Func->getEntryBlock(), Func->getEntryBlock().begin());
+ IRB.SetInsertPoint(Func->getEntryBlock().begin());
return IRB.CreateZExt(V, IntTy);
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 5c10e04325d51..d19788b30043d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -1405,7 +1405,7 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
AggrStores.insert(&I);
}
- B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
+ B.SetInsertPoint(Func.getEntryBlock().begin());
for (auto &GV : Func.getParent()->globals())
processGlobalValue(GV, B);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 027ee1086bf4e..edac6cdc9a293 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -1305,7 +1305,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
// Add a phi to the tail, which will be the output of setjmp, which
// indicates if this is the first call or a longjmp back. The phi directly
// uses the right value based on where we arrive from
- IRB.SetInsertPoint(Tail, Tail->getFirstNonPHIIt());
+ IRB.SetInsertPoint(Tail->getFirstNonPHIIt());
PHINode *SetjmpRet = IRB.CreatePHI(IRB.getInt32Ty(), 2, "setjmp.ret");
// setjmp initial call returns 0
diff --git a/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp b/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
index e355a4b9d35b9..55ddd59b56506 100644
--- a/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
@@ -492,7 +492,7 @@ X86LowerAMXIntrinsics::lowerTileDP(Instruction *TileDP) {
KDWord, C, A, B);
// we cannot assume there always be bitcast after tiledpbssd. So we need to
// insert one bitcast as required
- Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
+ Builder.SetInsertPoint(End->getFirstNonPHIIt());
Value *ResAMX =
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
// Delete TileDP intrinsic and do some clean-up.
@@ -536,7 +536,7 @@ bool X86LowerAMXIntrinsics::lowerTileLoadStore(Instruction *TileLoadStore) {
if (IsTileLoad) {
// we cannot assume there always be bitcast after tileload. So we need to
// insert one bitcast as required
- Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
+ Builder.SetInsertPoint(End->getFirstNonPHIIt());
Value *ResAMX =
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
// Delete tileloadd6 intrinsic and do some clean-up
diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp
index 079ac983a8a01..a827d5e0012a0 100644
--- a/llvm/lib/Target/X86/X86LowerAMXType.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp
@@ -476,9 +476,8 @@ static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) {
Value *Row = II->getOperand(0);
Value *Col = II->getOperand(1);
- BasicBlock *BB = TileDef->getParent();
BasicBlock::iterator Iter = TileDef->getIterator();
- IRBuilder<> Builder(BB, ++Iter);
+ IRBuilder<> Builder(++Iter);
Value *Stride = Builder.getInt64(64);
std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef};
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 578d653c1e0ad..98d7fb9d8152e 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -274,7 +274,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
// Struct type of RegNode. Used for GEPing.
Type *RegNodeTy;
- IRBuilder<> Builder(&F->getEntryBlock(), F->getEntryBlock().begin());
+ IRBuilder<> Builder(F->getEntryBlock().begin());
Type *Int8PtrType = Builder.getPtrTy();
Type *Int32Ty = Builder.getInt32Ty();
Type *VoidTy = Builder.getVoidTy();
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index c7e84a009221f..c88907f2a687e 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -139,7 +139,7 @@ static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
if (Pred != CmpInst::ICMP_EQ)
return false;
- IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
+ IRBuilder<> Builder(PhiBB->getFirstInsertionPt());
if (ShVal0 == ShVal1)
++NumGuardedRotates;
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 8e829a53aeca2..31b6246e3bd48 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -1857,7 +1857,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
}
auto Index = FrameData.getFieldIndex(Def);
- Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
+ Builder.SetInsertPoint(InsertPt);
auto *G = Builder.CreateConstInBoundsGEP2_32(
FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
if (ByValTy) {
@@ -1877,8 +1877,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
// reference provided with the frame GEP.
if (CurrentBlock != U->getParent()) {
CurrentBlock = U->getParent();
- Builder.SetInsertPoint(CurrentBlock,
- CurrentBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(CurrentBlock->getFirstInsertionPt());
auto *GEP = GetFramePointer(E.first);
GEP->setName(E.first->getName() + Twine(".reload.addr"));
@@ -1969,7 +1968,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
Shape.ABI == coro::ABI::Async) {
// If we found any allocas, replace all of their remaining uses with Geps.
- Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
+ Builder.SetInsertPoint(SpillBlock->begin());
for (const auto &P : FrameData.Allocas) {
AllocaInst *Alloca = P.Alloca;
auto *G = GetFramePointer(Alloca);
@@ -1988,8 +1987,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
// dbg.declares and dbg.values with the reload from the frame.
// Note: We cannot replace the alloca with GEP instructions indiscriminately,
// as some of the uses may not be dominated by CoroBegin.
- Builder.SetInsertPoint(Shape.AllocaSpillBlock,
- Shape.AllocaSpillBlock->begin());
+ Builder.SetInsertPoint(Shape.AllocaSpillBlock->begin());
SmallVector<Instruction *, 4> UsersToUpdate;
for (const auto &A : FrameData.Allocas) {
AllocaInst *Alloca = A.Alloca;
@@ -2874,7 +2872,7 @@ salvageDebugInfoImpl(SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
while (isa<IntrinsicInst>(InsertPt))
++InsertPt;
- Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
+ Builder.SetInsertPoint(InsertPt);
while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index c4b9375a53a27..a026a4ed1911f 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -7469,7 +7469,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
unsigned ArgNo, BasicBlock::iterator IP) {
assert(PrivType && "Expected privatizable type!");
- IRBuilder<NoFolder> IRB(IP->getParent(), IP);
+ IRBuilder<NoFolder> IRB(IP);
const DataLayout &DL = F.getParent()->getDataLayout();
// Traverse the type, build GEPs and stores.
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index e3a4821b8226b..9e92438edc9d0 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -1722,8 +1722,7 @@ struct OpenMPOpt {
auto &IRBuilder = OMPInfoCache.OMPBuilder;
Function *F = RuntimeCall.getCaller();
BasicBlock &Entry = F->getEntryBlock();
- IRBuilder.Builder.SetInsertPoint(&Entry,
- Entry.getFirstNonPHIOrDbgOrAlloca());
+ IRBuilder.Builder.SetInsertPoint(Entry.getFirstNonPHIOrDbgOrAlloca());
Value *Handle = IRBuilder.Builder.CreateAlloca(
IRBuilder.AsyncInfo, /*ArraySize=*/nullptr, "handle");
Handle =
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index d767fa3930e2f..c31ed4df4878f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4129,7 +4129,7 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
// users are freely-invertible, so that 'not' *will* get folded away.
BuilderTy::InsertPointGuard Guard(Builder);
// Set insertion point to right after the Y.
- Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
+ Builder.SetInsertPoint(++(Y->getIterator()));
Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
// Replace all uses of Y (excluding the one in NotY!) with NotY.
Worklist.pushUsersToWorkList(*Y);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 522c31f9e3e71..9d04fd70d8a0a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -514,7 +514,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
bool Before = true) {
if (auto *PHI = dyn_cast<PHINode>(V)) {
BasicBlock *Parent = PHI->getParent();
- Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
+ Builder.SetInsertPoint(Parent->getFirstInsertionPt());
return;
}
if (auto *I = dyn_cast<Instruction>(V)) {
@@ -526,7 +526,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
if (auto *A = dyn_cast<Argument>(V)) {
// Set the insertion point in the entry block.
BasicBlock &Entry = A->getParent()->getEntryBlock();
- Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
+ Builder.SetInsertPoint(Entry.getFirstInsertionPt());
return;
}
// Otherwise, this is a constant and we don't need to set a new
diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 86411320ab248..19464d0d0ef83 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -1371,7 +1371,7 @@ static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
// sinking.
auto InsertPt = BB->getFirstInsertionPt();
if (InsertPt != BB->end()) {
- Self.Builder.SetInsertPoint(&*BB, InsertPt);
+ Self.Builder.SetInsertPoint(InsertPt);
return Self.Builder.CreateNot(Cond);
}
@@ -1417,7 +1417,7 @@ static Value *foldDependentIVs(PHINode &PN, IRBuilderBase &Builder) {
if (Iv2Start != Identity)
return nullptr;
- Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
if (!BO) {
auto *GEP = cast<GEPOperator>(IvNext);
return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index fbac209c3da6f..374856bc2734f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2678,7 +2678,7 @@ static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
return nullptr;
}
- Builder.SetInsertPoint(BB, BB->begin());
+ Builder.SetInsertPoint(BB->begin());
auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
for (auto *Pred : predecessors(BB))
PN->addIncoming(Inputs[Pred], Pred);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index ebc2930d33d26..9639e3023e862 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -1123,7 +1123,7 @@ Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
// Note that the same block can be a predecessor more than once,
// and we need to preserve that invariant for the PHI node.
BuilderTy::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
+ Builder.SetInsertPoint(UseBB->getFirstNonPHIIt());
auto *PHI =
Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
for (BasicBlock *Pred : Preds)
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 18b98e9b8a67e..a45545c22ee55 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1956,8 +1956,7 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment(
void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
- IRBuilder<> IRB(&GlobalInit.front(),
- GlobalInit.front().getFirstInsertionPt());
+ IRBuilder<> IRB(GlobalInit.front().getFirstInsertionPt());
// Add a call to poison all external globals before the given function starts.
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
@@ -2869,7 +2868,7 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
if (F.getName().contains(" load]")) {
FunctionCallee AsanInitFunction =
declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
- IRBuilder<> IRB(&F.front(), F.front().begin());
+ IRBuilder<> IRB(F.front().begin());
IRB.CreateCall(AsanInitFunction, {});
return true;
}
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index cfa8ae26c6257..219474a2bd049 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -155,7 +155,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
SmallVector<std::pair<Instruction *, Value *>, 4> TrapInfo;
for (Instruction &I : instructions(F)) {
Value *Or = nullptr;
- BuilderTy IRB(I.getParent(), BasicBlock::iterator(&I), TargetFolder(DL));
+ BuilderTy IRB(I.getIterator(), TargetFolder(DL));
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (!LI->isVolatile())
Or = getBoundsCheckCond(LI->getPointerOperand(), LI, DL, TLI,
@@ -215,7 +215,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
// Add the checks.
for (const auto &Entry : TrapInfo) {
Instruction *Inst = Entry.first;
- BuilderTy IRB(Inst->getParent(), BasicBlock::iterator(Inst), TargetFolder(DL));
+ BuilderTy IRB(Inst->getIterator(), TargetFolder(DL));
insertBoundsCheck(Entry.second, IRB, GetTrapBB);
}
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index f0b0917a25938..b7fe498a5fa08 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -988,7 +988,7 @@ Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
if (DFS.isZeroShadow(PrimitiveShadow))
return DFS.getZeroShadow(ShadowTy);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
SmallVector<unsigned, 4> Indices;
Value *Shadow = UndefValue::get(ShadowTy);
Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
@@ -1039,7 +1039,7 @@ Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
if (CS && DT.dominates(CS, Pos))
return CS;
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
// Caches the converted primitive shadow value.
CS = PrimitiveShadow;
@@ -1772,7 +1772,7 @@ bool DataFlowSanitizer::runImpl(
Pos = DFSF.F->getEntryBlock().begin();
while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
Pos = std::next(Pos->getIterator());
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
Value *Ne =
IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
@@ -1919,7 +1919,7 @@ std::pair<Value *, Value *>
DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
BasicBlock::iterator Pos) {
// Returns ((Addr & shadow_mask) + origin_base - shadow_base) & ~4UL
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *ShadowOffset = getShadowOffset(Addr, IRB);
Value *ShadowLong = ShadowOffset;
uint64_t ShadowBase = MapParams->ShadowBase;
@@ -1952,13 +1952,13 @@ DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
BasicBlock::iterator Pos,
Value *ShadowOffset) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
return IRB.CreateIntToPtr(ShadowOffset, PrimitiveShadowPtrTy);
}
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
BasicBlock::iterator Pos) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *ShadowOffset = getShadowOffset(Addr, IRB);
return getShadowAddress(Addr, Pos, ShadowOffset);
}
@@ -2010,7 +2010,7 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2,
Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
CCS.Block = Pos->getParent();
CCS.Shadow = IRB.CreateOr(PV1, PV2);
@@ -2074,7 +2074,7 @@ Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
}
Value *OpShadow = Shadows[I];
Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
}
@@ -2143,7 +2143,7 @@ bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
Value *DataFlowSanitizer::loadNextOrigin(BasicBlock::iterator Pos,
Align OriginAlign,
Value **OriginAddr) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
*OriginAddr =
IRB.CreateGEP(OriginTy, *OriginAddr, ConstantInt::get(IntptrTy, 1));
return IRB.CreateAlignedLoad(OriginTy, *OriginAddr, OriginAlign);
@@ -2175,7 +2175,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
Type *WideShadowTy =
ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *CombinedWideShadow =
IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
@@ -2244,7 +2244,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto SI = AllocaShadowMap.find(AI);
if (SI != AllocaShadowMap.end()) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
const auto OI = AllocaOriginMap.find(AI);
assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
@@ -2279,7 +2279,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
// tracking.
if (ShouldTrackOrigins &&
useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
CallInst *Call =
IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
{Addr, ConstantInt::get(DFS.IntptrTy, Size)});
@@ -2298,7 +2298,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
const Align OriginAlign = getOriginAlign(InstAlignment);
Value *Origin = nullptr;
if (ShouldTrackOrigins) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
}
@@ -2311,7 +2311,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
return {LI, Origin};
}
case 2: {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
ConstantInt::get(DFS.IntptrTy, 1));
Value *Load =
@@ -2327,7 +2327,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
return loadShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
OriginAlign, Origin, Pos);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
CallInst *FallbackCall = IRB.CreateCall(
DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
FallbackCall->addRetAttr(Attribute::ZExt);
@@ -2342,7 +2342,7 @@ DFSanFunction::loadShadowOrigin(Value *Addr, uint64_t Size, Align InstAlignment,
loadShadowOriginSansLoadTracking(Addr, Size, InstAlignment, Pos);
if (DFS.shouldTrackOrigins()) {
if (ClTrackOrigins == 2) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
auto *ConstantShadow = dyn_cast<Constant>(PrimitiveShadow);
if (!ConstantShadow || !ConstantShadow->isZeroValue())
Origin = updateOriginIfTainted(PrimitiveShadow, Origin, IRB);
@@ -2445,14 +2445,14 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
}
if (ClEventCallbacks) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *Addr = LI.getPointerOperand();
CallInst *CI =
IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr});
CI->addParamAttr(0, Attribute::ZExt);
}
- IRBuilder<> IRB(AfterLi->getParent(), AfterLi);
+ IRBuilder<> IRB(AfterLi);
DFSF.addReachesFunctionCallbacksIfEnabled(IRB, LI, &LI);
}
@@ -2531,7 +2531,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
// untainted sinks.
const Align OriginAlignment = getOriginAlign(InstAlignment);
Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
if (!ConstantShadow->isZeroValue())
paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
@@ -2558,7 +2558,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
Align ShadowAlign,
BasicBlock::iterator Pos) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
IntegerType *ShadowTy =
IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
@@ -2578,7 +2578,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto SI = AllocaShadowMap.find(AI);
if (SI != AllocaShadowMap.end()) {
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
IRB.CreateStore(PrimitiveShadow, SI->second);
// Do not write origins for 0 shadows because we do not trace origins for
@@ -2598,7 +2598,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
return;
}
- IRBuilder<> IRB(Pos->getParent(), Pos);
+ IRBuilder<> IRB(Pos);
Value *ShadowAddr, *OriginAddr;
std::tie(ShadowAddr, OriginAddr) =
DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index c7f6f2a43c17f..088c058e6b6a9 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -918,7 +918,7 @@ bool GCOVProfiler::emitProfileNotes(
for (size_t I : llvm::seq<size_t>(0, Measured)) {
const Edge &E = *MST.allEdges()[I];
- IRBuilder<> Builder(E.Place, E.Place->getFirstInsertionPt());
+ IRBuilder<> Builder(E.Place->getFirstInsertionPt());
Value *V = Builder.CreateConstInBoundsGEP2_64(
Counters->getValueType(), Counters, 0, I);
// Disable sanitizers to decrease size bloat. We don't expect
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index a0e63bf12400e..f5b3158ee8e9f 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1622,7 +1622,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
A.removeAttr(llvm::Attribute::WriteOnly);
BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
- IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
+ IRBuilder<> EntryIRB(InsertPt);
emitPrologue(EntryIRB,
/*WithFrameRecord*/ ClRecordStackHistory != none &&
Mapping.WithFrameRecord &&
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index 8a12fa19a3ded..e5f9273c47a2a 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -558,7 +558,7 @@ bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
if (F.getName().contains(" load]")) {
FunctionCallee MemProfInitFunction =
declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {});
- IRBuilder<> IRB(&F.front(), F.front().begin());
+ IRBuilder<> IRB(F.front().begin());
IRB.CreateCall(MemProfInitFunction, {});
return true;
}
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index ac6d3348b3db9..ed9dc238d4c3d 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -884,7 +884,7 @@ static void instrumentOneFunc(
FuncInfo.FunctionHash);
if (PGOFunctionEntryCoverage) {
auto &EntryBB = F.getEntryBlock();
- IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
+ IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
// llvm.instrprof.cover(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
@@ -939,7 +939,7 @@ static void instrumentOneFunc(
if (PGOTemporalInstrumentation) {
NumCounters += PGOBlockCoverage ? 8 : 1;
auto &EntryBB = F.getEntryBlock();
- IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
+ IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
// llvm.instrprof.timestamp(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
@@ -949,7 +949,7 @@ static void instrumentOneFunc(
}
for (auto *InstrBB : InstrumentBBs) {
- IRBuilder<> Builder(InstrBB, InstrBB->getFirstInsertionPt());
+ IRBuilder<> Builder(InstrBB->getFirstInsertionPt());
assert(Builder.GetInsertPoint() != InstrBB->end() &&
"Cannot get the Instrumentation point");
// llvm.instrprof.increment(i8* <name>, i64 <hash>, i32 <num-counters>,
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
index 33870d7ea192a..b4f5e828b7ff8 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
@@ -80,7 +80,7 @@ CallInst *BundledRetainClaimRVs::insertRVCall(BasicBlock::iterator InsertPt,
CallInst *BundledRetainClaimRVs::insertRVCallWithColors(
BasicBlock::iterator InsertPt, CallBase *AnnotatedCall,
const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
Function *Func = *objcarc::getAttachedARCFunction(AnnotatedCall);
assert(Func && "operand isn't a Function");
Type *ParamTy = Func->getArg(0)->getType();
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 70bfa469193bf..4ec4750832246 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -1575,7 +1575,7 @@ void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B,
static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B,
SmallVectorImpl<Instruction *> &ToRemove) {
bool Changed = false;
- IRBuilder<> Builder(II->getParent(), II->getIterator());
+ IRBuilder<> Builder(II->getIterator());
Value *Sub = nullptr;
for (User *U : make_early_inc_range(II->users())) {
if (match(U, m_ExtractValue<0>(m_Value()))) {
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index b6498c4e36963..0c2ef230e49bf 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -792,7 +792,7 @@ Value *GuardWideningImpl::hoistChecks(SmallVectorImpl<Value *> &ChecksToHoist,
Value *OldCondition,
BasicBlock::iterator InsertPt) {
assert(!ChecksToHoist.empty());
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
makeAvailableAt(ChecksToHoist, InsertPt);
makeAvailableAt(OldCondition, InsertPt);
Value *Result = Builder.CreateAnd(ChecksToHoist);
diff --git a/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp b/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
index 9a27a08c86eb4..cff473bc8209a 100644
--- a/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
@@ -430,7 +430,7 @@ static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI,
ExitingCond.BI->setSuccessor(1, PostLoopPreHeader);
// Update phi node in exit block of post-loop.
- Builder.SetInsertPoint(PostLoopPreHeader, PostLoopPreHeader->begin());
+ Builder.SetInsertPoint(PostLoopPreHeader->begin());
for (PHINode &PN : PostLoop->getExitBlock()->phis()) {
for (auto i : seq<int>(0, PN.getNumOperands())) {
// Check incoming block is pre-loop's exiting block.
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 3fe5478408d45..12a11cf74320a 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -2491,7 +2491,7 @@ bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
// Step 4: Rewrite the loop into a countable form, with canonical IV.
// The new canonical induction variable.
- Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
+ Builder.SetInsertPoint(LoopHeaderBB->begin());
auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
// The induction itself.
@@ -2815,11 +2815,11 @@ bool LoopIdiomRecognize::recognizeShiftUntilZero() {
// Step 3: Rewrite the loop into a countable form, with canonical IV.
// The new canonical induction variable.
- Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
+ Builder.SetInsertPoint(LoopHeaderBB->begin());
auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
// The induction itself.
- Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->getFirstNonPHIIt());
+ Builder.SetInsertPoint(LoopHeaderBB->getFirstNonPHIIt());
auto *CIVNext =
Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next",
/*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index 027dbb9c0f71a..e25b34b4e55f4 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -782,7 +782,7 @@ bool LoopPredication::widenWidenableBranchGuardConditions(
BI->setCondition(AllChecks);
if (InsertAssumesOfPredicatedGuardsConditions) {
BasicBlock *IfTrueBB = BI->getSuccessor(0);
- Builder.SetInsertPoint(IfTrueBB, IfTrueBB->getFirstInsertionPt());
+ Builder.SetInsertPoint(IfTrueBB->getFirstInsertionPt());
// If this block has other predecessors, we might not be able to use Cond.
// In this case, create a Phi where every other input is `true` and input
// from guard block is Cond.
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index e991296bd2fb0..fb8f10f2941d9 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1656,7 +1656,7 @@ class LowerMatrixIntrinsics {
// condition holds, they alias, otherwise they are guaranteed to not
// overlap.
Check1->getTerminator()->eraseFromParent();
- Builder.SetInsertPoint(Check1, Check1->begin());
+ Builder.SetInsertPoint(Check1->begin());
Value *LoadEnd = Builder.CreateAdd(
LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
"load.end", true, true);
@@ -1664,7 +1664,7 @@ class LowerMatrixIntrinsics {
Fusion);
// Copy load operand to new alloca.
- Builder.SetInsertPoint(Copy, Copy->begin());
+ Builder.SetInsertPoint(Copy->begin());
auto *VT = cast<FixedVectorType>(Load->getType());
// Use an array type for the alloca, to avoid potentially huge alignment
// requirements for large vector types.
@@ -1674,7 +1674,7 @@ class LowerMatrixIntrinsics {
Builder.CreateMemCpy(Alloca, Alloca->getAlign(), Load->getPointerOperand(),
Load->getAlign(), LoadLoc.Size.getValue());
- Builder.SetInsertPoint(Fusion, Fusion->begin());
+ Builder.SetInsertPoint(Fusion->begin());
PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
PHI->addIncoming(Load->getPointerOperand(), Check0);
PHI->addIncoming(Load->getPointerOperand(), Check1);
diff --git a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 3a699df1cde4d..6c4c5f649d1c4 100644
--- a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -70,7 +70,7 @@ static bool optimizeSQRT(CallInst *Call, Function *CalledFunc,
// Create phi that will merge results of either sqrt and replace all uses.
BasicBlock *JoinBB = LibCallTerm->getSuccessor(0);
JoinBB->setName(CurrBB.getName() + ".split");
- Builder.SetInsertPoint(JoinBB, JoinBB->begin());
+ Builder.SetInsertPoint(JoinBB->begin());
PHINode *Phi = Builder.CreatePHI(Ty, 2);
Call->replaceAllUsesWith(Phi);
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index e0a9cff620189..5677ecc71fd95 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1870,7 +1870,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
UnwindBlock->getUniquePredecessor() &&
"can't safely insert in this block!");
- Builder.SetInsertPoint(UnwindBlock, UnwindBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(UnwindBlock->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(II->getDebugLoc());
// Attach exceptional gc relocates to the landingpad.
@@ -1885,7 +1885,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
NormalDest->getUniquePredecessor() &&
"can't safely insert in this block!");
- Builder.SetInsertPoint(NormalDest, NormalDest->getFirstInsertionPt());
+ Builder.SetInsertPoint(NormalDest->getFirstInsertionPt());
// gc relocates will be generated later as if it were regular call
// statepoint
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 2adbdca4b5286..b90059ded1755 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2942,7 +2942,7 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// after the load, so that variable values referring to the load are
// dominated by it.
LIIt.setHeadBit(true);
- IRB.SetInsertPoint(LI.getParent(), LIIt);
+ IRB.SetInsertPoint(LIIt);
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
@@ -3604,8 +3604,7 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// dominate the PHI.
IRBuilderBase::InsertPointGuard Guard(IRB);
if (isa<PHINode>(OldPtr))
- IRB.SetInsertPoint(OldPtr->getParent(),
- OldPtr->getParent()->getFirstInsertionPt());
+ IRB.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt());
else
IRB.SetInsertPoint(OldPtr);
IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc());
diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index 8f820a3bba2b3..f41911737eea2 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -239,7 +239,7 @@ static void scalarizeMaskedLoad(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
Phi->addIncoming(NewVResult, CondBlock);
Phi->addIncoming(VResult, PrevIfBlock);
@@ -366,7 +366,7 @@ static void scalarizeMaskedStore(const DataLayout &DL, CallInst *CI,
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
}
CI->eraseFromParent();
@@ -493,7 +493,7 @@ static void scalarizeMaskedGather(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
Phi->addIncoming(NewVResult, CondBlock);
Phi->addIncoming(VResult, PrevIfBlock);
@@ -615,7 +615,7 @@ static void scalarizeMaskedScatter(const DataLayout &DL, CallInst *CI,
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
}
CI->eraseFromParent();
@@ -733,7 +733,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
PHINode *ResultPhi = Builder.CreatePHI(VecType, 2, "res.phi.else");
ResultPhi->addIncoming(NewVResult, CondBlock);
ResultPhi->addIncoming(VResult, PrevIfBlock);
@@ -847,7 +847,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
BasicBlock *PrevIfBlock = IfBlock;
IfBlock = NewIfBlock;
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
// Add a PHI for the pointer if this isn't the last iteration.
if ((Idx + 1) != VectorWidth) {
@@ -918,7 +918,7 @@ static void scalarizeMaskedVectorHistogram(const DataLayout &DL, CallInst *CI,
// Create "else" block, fill it in the next iteration
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock->begin());
}
CI->eraseFromParent();
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index 3eca9ac7c2673..fd7d8c21e3f88 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -127,10 +127,10 @@ class Scatterer {
Scatterer() = default;
// Scatter V into Size components. If new instructions are needed,
- // insert them before BBI in BB. If Cache is nonnull, use it to cache
+ // insert them before BBI. If Cache is nonnull, use it to cache
// the results.
- Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
- const VectorSplit &VS, ValueVector *cachePtr = nullptr);
+ Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
+ ValueVector *cachePtr = nullptr);
// Return component I, creating a new Value for it if necessary.
Value *operator[](unsigned I);
@@ -139,7 +139,6 @@ class Scatterer {
unsigned size() const { return VS.NumFragments; }
private:
- BasicBlock *BB;
BasicBlock::iterator BBI;
Value *V;
VectorSplit VS;
@@ -342,9 +341,9 @@ class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> {
} // end anonymous namespace
-Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
- const VectorSplit &VS, ValueVector *cachePtr)
- : BB(bb), BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
+Scatterer::Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
+ ValueVector *cachePtr)
+ : BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
IsPointer = V->getType()->isPointerTy();
if (!CachePtr) {
Tmp.resize(VS.NumFragments, nullptr);
@@ -363,7 +362,7 @@ Value *Scatterer::operator[](unsigned Frag) {
// Try to reuse a previous value.
if (CV[Frag])
return CV[Frag];
- IRBuilder<> Builder(BB, BBI);
+ IRBuilder<> Builder(BBI);
if (IsPointer) {
if (Frag == 0)
CV[Frag] = V;
@@ -443,7 +442,7 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
// so that it can be used everywhere.
Function *F = VArg->getParent();
BasicBlock *BB = &F->getEntryBlock();
- return Scatterer(BB, BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
+ return Scatterer(BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
}
if (Instruction *VOp = dyn_cast<Instruction>(V)) {
// When scalarizing PHI nodes we might try to examine/rewrite InsertElement
@@ -453,18 +452,17 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
// originating from instructions in unreachable blocks as undef we do not
// need to analyse them further.
if (!DT->isReachableFromEntry(VOp->getParent()))
- return Scatterer(Point->getParent(), Point->getIterator(),
- PoisonValue::get(V->getType()), VS);
+ return Scatterer(Point->getIterator(), PoisonValue::get(V->getType()),
+ VS);
// Put the scattered form of an instruction directly after the
// instruction, skipping over PHI nodes and debug intrinsics.
- BasicBlock *BB = VOp->getParent();
return Scatterer(
- BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
+ skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
&Scattered[{V, VS.SplitTy}]);
}
// In the fallback case, just put the scattered before Point and
// keep the result local to Point.
- return Scatterer(Point->getParent(), Point->getIterator(), V, VS);
+ return Scatterer(Point->getIterator(), V, VS);
}
// Replace Op with the gathered form of the components in CV. Defer the
@@ -1181,7 +1179,7 @@ bool ScalarizerVisitor::finish() {
BasicBlock *BB = Op->getParent();
IRBuilder<> Builder(Op);
if (isa<PHINode>(Op))
- Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
VectorSplit VS = *getVectorSplit(Ty);
assert(VS.NumFragments == CV.size());
diff --git a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
index 6ca737df49b95..4cc16be37850c 100644
--- a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
+++ b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
@@ -134,7 +134,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
Builder.CreateCondBr(Cmp, WhileDone, While);
// Add one to the computed length.
- Builder.SetInsertPoint(WhileDone, WhileDone->begin());
+ Builder.SetInsertPoint(WhileDone->begin());
auto Begin = Builder.CreatePtrToInt(Str, Int64Ty);
auto End = Builder.CreatePtrToInt(PtrPhi, Int64Ty);
auto Len = Builder.CreateSub(End, Begin);
@@ -142,7 +142,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
// Final join.
BranchInst::Create(Join, WhileDone);
- Builder.SetInsertPoint(Join, Join->begin());
+ Builder.SetInsertPoint(Join->begin());
auto LenPhi = Builder.CreatePHI(Len->getType(), 2);
LenPhi->addIncoming(Len, WhileDone);
LenPhi->addIncoming(Zero, Prev);
diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 73a50b793e6d2..58056b9bb7809 100644
--- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -260,7 +260,7 @@ QuotRemWithBB FastDivInsertionTask::createSlowBB(BasicBlock *SuccessorBB) {
QuotRemWithBB DivRemPair;
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
MainBB->getParent(), SuccessorBB);
- IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
+ IRBuilder<> Builder(DivRemPair.BB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *Dividend = SlowDivOrRem->getOperand(0);
@@ -284,7 +284,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
QuotRemWithBB DivRemPair;
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
MainBB->getParent(), SuccessorBB);
- IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
+ IRBuilder<> Builder(DivRemPair.BB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *Dividend = SlowDivOrRem->getOperand(0);
@@ -310,7 +310,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
QuotRemWithBB &RHS,
BasicBlock *PhiBB) {
- IRBuilder<> Builder(PhiBB, PhiBB->begin());
+ IRBuilder<> Builder(PhiBB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
PHINode *QuoPhi = Builder.CreatePHI(getSlowType(), 2);
QuoPhi->addIncoming(LHS.Quotient, LHS.BB);
@@ -327,7 +327,7 @@ QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
/// doesn't need a runtime check.
Value *FastDivInsertionTask::insertOperandRuntimeCheck(Value *Op1, Value *Op2) {
assert((Op1 || Op2) && "Nothing to check");
- IRBuilder<> Builder(MainBB, MainBB->end());
+ IRBuilder<> Builder(MainBB->end());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *OrV;
@@ -397,7 +397,7 @@ std::optional<QuotRemPair> FastDivInsertionTask::insertFastDivAndRem() {
isa<ConstantInt>(BCI->getOperand(0)))
return std::nullopt;
- IRBuilder<> Builder(MainBB, MainBB->end());
+ IRBuilder<> Builder(MainBB->end());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
if (DividendShort && !isSignedOp()) {
diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
index dda80d419999d..d70850125ed1a 100644
--- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
+++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
@@ -113,7 +113,7 @@ static void createRetPHINode(Instruction *OrigInst, Instruction *NewInst,
if (OrigInst->getType()->isVoidTy() || OrigInst->use_empty())
return;
- Builder.SetInsertPoint(MergeBlock, MergeBlock->begin());
+ Builder.SetInsertPoint(MergeBlock->begin());
PHINode *Phi = Builder.CreatePHI(OrigInst->getType(), 0);
SmallVector<User *, 16> UsersToUpdate(OrigInst->users());
for (User *U : UsersToUpdate)
diff --git a/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/llvm/lib/Transforms/Utils/FlattenCFG.cpp
index 16b4bb1981d8b..27201af694d44 100644
--- a/llvm/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/llvm/lib/Transforms/Utils/FlattenCFG.cpp
@@ -487,7 +487,6 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
FirstEntryBlock->splice(FirstEntryBlock->end(), SecondEntryBlock);
BranchInst *PBI = cast<BranchInst>(FirstEntryBlock->getTerminator());
assert(PBI->getCondition() == CInst2);
- BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
Builder.SetInsertPoint(PBI);
if (InvertCond2) {
@@ -495,7 +494,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
}
Value *NC = Builder.CreateBinOp(CombineOp, CInst1, PBI->getCondition());
PBI->replaceUsesOfWith(PBI->getCondition(), NC);
- Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
+ Builder.SetInsertPoint(SaveInsertPt);
// Remove IfTrue1
if (IfTrue1 != FirstEntryBlock) {
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index c4baafd1fc5bb..7be242a51c969 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1534,7 +1534,7 @@ static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
Module *M, BasicBlock *InsertBlock,
InlineFunctionInfo &IFI,
Function *CalledFunc) {
- IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
+ IRBuilder<> Builder(InsertBlock->begin());
Value *Size =
Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
@@ -2530,7 +2530,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
!IFI.StaticAllocas.empty()) {
- IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
+ IRBuilder<> builder(FirstNewBlock->begin());
for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
AllocaInst *AI = IFI.StaticAllocas[ai];
// Don't mark swifterror allocas. They can't have bitcast uses.
@@ -2585,8 +2585,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// code with llvm.stacksave/llvm.stackrestore intrinsics.
if (InlinedFunctionInfo.ContainsDynamicAllocas) {
// Insert the llvm.stacksave.
- CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
- .CreateStackSave("savedstack");
+ CallInst *SavedPtr =
+ IRBuilder<>(FirstNewBlock->begin()).CreateStackSave("savedstack");
// Insert a call to llvm.stackrestore before any return instructions in the
// inlined function.
diff --git a/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
index cea095408b0c3..efb93074364ef 100644
--- a/llvm/lib/Transforms/Utils/IntegerDivision.cpp
+++ b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -316,7 +316,7 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; end: ; preds = %loop-exit, %special-cases
// ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
// ; ret i32 %q_5
- Builder.SetInsertPoint(End, End->begin());
+ Builder.SetInsertPoint(End->begin());
PHINode *Q_5 = Builder.CreatePHI(DivTy, 2);
// Populate the Phis, since all values have now been created. Our Phis were:
diff --git a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
index cad7ff64c01fb..92713bc1f5c1f 100644
--- a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
+++ b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
@@ -150,7 +150,7 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT,
// We've computed IDF, now insert new phi-nodes there.
SmallVector<PHINode *, 4> InsertedPHIsForVar;
for (auto *FrontierBB : IDFBlocks) {
- IRBuilder<> B(FrontierBB, FrontierBB->begin());
+ IRBuilder<> B(FrontierBB->begin());
PHINode *PN = B.CreatePHI(R.Ty, 0, R.Name);
R.Defines[FrontierBB] = PN;
InsertedPHIsForVar.push_back(PN);
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index c7d758aa575e6..be5ff67789ca8 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1069,7 +1069,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Create the PHI.
BasicBlock *Header = L->getHeader();
- Builder.SetInsertPoint(Header, Header->begin());
+ Builder.SetInsertPoint(Header->begin());
PHINode *PN =
Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
@@ -1521,7 +1521,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
return I->second;
SCEVInsertPointGuard Guard(Builder, this);
- Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
+ Builder.SetInsertPoint(InsertPt);
// Expand the expression into instructions.
SmallVector<Instruction *> DropPoisonGeneratingInsts;
@@ -1656,7 +1656,7 @@ void SCEVExpander::replaceCongruentIVInc(
else
IP = OrigInc->getNextNonDebugInstruction()->getIterator();
- IRBuilder<> Builder(IP->getParent(), IP);
+ IRBuilder<> Builder(IP);
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
NewInc =
Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
@@ -1759,8 +1759,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
++NumElim;
Value *NewIV = OrigPhiRef;
if (OrigPhiRef->getType() != Phi->getType()) {
- IRBuilder<> Builder(L->getHeader(),
- L->getHeader()->getFirstInsertionPt());
+ IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 4e2dc7f2b2f4e..a185efe98ca81 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -4180,7 +4180,7 @@ static bool mergeConditionalStoreToAddress(
QStore->getParent(), PPHI);
BasicBlock::iterator PostBBFirst = PostBB->getFirstInsertionPt();
- IRBuilder<> QB(PostBB, PostBBFirst);
+ IRBuilder<> QB(PostBBFirst);
QB.SetCurrentDebugLocation(PostBBFirst->getStableDebugLoc());
Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond);
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 912c02c2ed3ae..612cad3c67b13 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -1793,8 +1793,7 @@ bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
"Not a LCSSA Phi?");
WidePN->addIncoming(WideBO, LoopExitingBlock);
- Builder.SetInsertPoint(User->getParent(),
- User->getParent()->getFirstInsertionPt());
+ Builder.SetInsertPoint(User->getParent()->getFirstInsertionPt());
auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
User->replaceAllUsesWith(TruncPN);
DeadInsts.emplace_back(User);
@@ -1857,7 +1856,7 @@ Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU,
UsePhi->getIterator());
WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
BasicBlock *WidePhiBB = WidePhi->getParent();
- IRBuilder<> Builder(WidePhiBB, WidePhiBB->getFirstInsertionPt());
+ IRBuilder<> Builder(WidePhiBB->getFirstInsertionPt());
Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType(), "",
CanWidenByZExt, CanWidenBySExt);
UsePhi->replaceAllUsesWith(Trunc);
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 60ea200ad9ff9..22b82f33e803d 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -2829,12 +2829,12 @@ static bool insertSinCosCall(IRBuilderBase &B, Function *OrigCallee, Value *Arg,
if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
// If the argument is an instruction, it must dominate all uses so put our
// sincos call there.
- B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
+ B.SetInsertPoint(++ArgInst->getIterator());
} else {
// Otherwise (e.g. for a constant) the beginning of the function is as
// good a place as any.
BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
- B.SetInsertPoint(&EntryBB, EntryBB.begin());
+ B.SetInsertPoint(EntryBB.begin());
}
SinCos = B.CreateCall(Callee, Arg, "sincospi");
diff --git a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
index 38095b1433ebe..b6afb74aa579e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
@@ -646,7 +646,7 @@ Value *LoopIdiomVectorize::expandFindMismatch(
// 3. We didn't find a mismatch in the vector loop, so we return MaxLen.
// 4. We exitted the vector loop early due to a mismatch and need to return
// the index that we found.
- Builder.SetInsertPoint(EndBlock, EndBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(EndBlock->getFirstInsertionPt());
PHINode *ResPhi = Builder.CreatePHI(ResType, 4, "mismatch_result");
ResPhi->addIncoming(MaxLen, LoopIncBlock);
ResPhi->addIncoming(IndexPhi, LoopStartBlock);
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 9571cfe358bf3..a10a53f0b2ba3 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3026,8 +3026,7 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
// Compute the end value for the additional bypass (if applicable).
if (AdditionalBypass.first) {
- B.SetInsertPoint(AdditionalBypass.first,
- AdditionalBypass.first->getFirstInsertionPt());
+ B.SetInsertPoint(AdditionalBypass.first->getFirstInsertionPt());
EndValueFromAdditionalBypass =
emitTransformedIndex(B, AdditionalBypass.second, II.getStartValue(),
Step, II.getKind(), II.getInductionBinOp());
@@ -3441,8 +3440,7 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
// Fix LCSSA phis not already fixed earlier. Extracts may need to be generated
// in the exit block, so update the builder.
- State.Builder.SetInsertPoint(State.CFG.ExitBB,
- State.CFG.ExitBB->getFirstNonPHIIt());
+ State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHIIt());
for (const auto &KV : Plan.getLiveOuts())
KV.second->fixPhi(Plan, State);
@@ -3485,7 +3483,7 @@ void InnerLoopVectorizer::fixFixedOrderRecurrence(VPLiveOut *LO,
PHINode *ScalarHeaderPhi = LO->getPhi();
auto *InitScalarFOR =
ScalarHeaderPhi->getIncomingValueForBlock(LoopScalarPreHeader);
- Builder.SetInsertPoint(LoopScalarPreHeader, LoopScalarPreHeader->begin());
+ Builder.SetInsertPoint(LoopScalarPreHeader->begin());
auto *ScalarPreheaderPhi =
Builder.CreatePHI(ScalarHeaderPhi->getType(), 2, "scalar.recur.init");
for (auto *BB : predecessors(LoopScalarPreHeader)) {
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index ae0819c964bef..bf11b76ec1e46 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -11376,12 +11376,11 @@ void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
LastInstIt = LastInst->getParent()->getFirstNonPHIIt();
if (IsPHI || (E->State != TreeEntry::NeedToGather &&
doesNotNeedToSchedule(E->Scalars))) {
- Builder.SetInsertPoint(LastInst->getParent(), LastInstIt);
+ Builder.SetInsertPoint(LastInstIt);
} else {
// Set the insertion point after the last instruction in the bundle. Set the
// debug location to Front.
Builder.SetInsertPoint(
- LastInst->getParent(),
LastInst->getNextNonDebugInstruction()->getIterator());
}
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
@@ -12617,8 +12616,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
if (PostponedPHIs && E->VectorizedValue)
return E->VectorizedValue;
auto *PH = cast<PHINode>(VL0);
- Builder.SetInsertPoint(PH->getParent(),
- PH->getParent()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(PH->getParent()->getFirstNonPHIIt());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
if (PostponedPHIs || !E->VectorizedValue) {
PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
@@ -12626,8 +12624,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
Value *V = NewPhi;
// Adjust insertion point once all PHI's have been generated.
- Builder.SetInsertPoint(PH->getParent(),
- PH->getParent()->getFirstInsertionPt());
+ Builder.SetInsertPoint(PH->getParent()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
V = FinalShuffle(V, E, VecTy);
@@ -13499,10 +13496,9 @@ Value *BoUpSLP::vectorizeTree(
EntryToLastInstruction.clear();
if (ReductionRoot)
- Builder.SetInsertPoint(ReductionRoot->getParent(),
- ReductionRoot->getIterator());
+ Builder.SetInsertPoint(ReductionRoot->getIterator());
else
- Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
+ Builder.SetInsertPoint(F->getEntryBlock().begin());
// Postpone emission of PHIs operands to avoid cyclic dependencies issues.
(void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true);
@@ -13756,13 +13752,11 @@ Value *BoUpSLP::vectorizeTree(
"instructions");
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
if (auto *PHI = dyn_cast<PHINode>(VecI))
- Builder.SetInsertPoint(PHI->getParent(),
- PHI->getParent()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(PHI->getParent()->getFirstNonPHIIt());
else
- Builder.SetInsertPoint(VecI->getParent(),
- std::next(VecI->getIterator()));
+ Builder.SetInsertPoint(std::next(VecI->getIterator()));
} else {
- Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
+ Builder.SetInsertPoint(F->getEntryBlock().begin());
}
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
// Required to update internally referenced instructions.
@@ -13868,8 +13862,7 @@ Value *BoUpSLP::vectorizeTree(
Instruction *IncomingTerminator =
PH->getIncomingBlock(I)->getTerminator();
if (isa<CatchSwitchInst>(IncomingTerminator)) {
- Builder.SetInsertPoint(VecI->getParent(),
- std::next(VecI->getIterator()));
+ Builder.SetInsertPoint(std::next(VecI->getIterator()));
} else {
Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator());
}
@@ -13883,7 +13876,7 @@ Value *BoUpSLP::vectorizeTree(
User->replaceUsesOfWith(Scalar, NewInst);
}
} else {
- Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
+ Builder.SetInsertPoint(F->getEntryBlock().begin());
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
User->replaceUsesOfWith(Scalar, NewInst);
}
@@ -14053,8 +14046,7 @@ Value *BoUpSLP::vectorizeTree(
It != MinBWs.end() &&
ReductionBitWidth != It->second.first) {
IRBuilder<>::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(ReductionRoot->getParent(),
- ReductionRoot->getIterator());
+ Builder.SetInsertPoint(ReductionRoot->getIterator());
Vec = Builder.CreateIntCast(
Vec,
VectorType::get(Builder.getIntNTy(ReductionBitWidth),
diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index e730c5b04bbb1..f4132c6a83125 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -121,7 +121,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -132,14 +132,14 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
// Add the store
- B.SetInsertPoint(Entry, Entry->begin());
+ B.SetInsertPoint(Entry->begin());
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
EntryStore, nullptr, Entry, MemorySSA::Beginning);
Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
// Add the load
- B.SetInsertPoint(Merge, Merge->begin());
+ B.SetInsertPoint(Merge->begin());
LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should not already exist.
@@ -156,7 +156,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
// Create a store on the left
// Add the store
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
LeftStore, nullptr, Left, MemorySSA::Beginning);
@@ -167,7 +167,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
EXPECT_NE(MP, nullptr);
// Add the second load
- B.SetInsertPoint(Merge, Merge->begin());
+ B.SetInsertPoint(Merge->begin());
LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Create the load memory access
@@ -181,7 +181,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
// Now create a store below the existing one in the entry
- B.SetInsertPoint(Entry, --Entry->end());
+ B.SetInsertPoint(--Entry->end());
StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
SecondEntryStore, nullptr, Entry, MemorySSA::End);
@@ -210,7 +210,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -220,7 +220,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
// Add the store
StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *StoreAccess =
@@ -232,7 +232,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
EXPECT_NE(MP, nullptr);
// Add the load
- B.SetInsertPoint(Merge, Merge->begin());
+ B.SetInsertPoint(Merge->begin());
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Create the load memory acccess
@@ -253,7 +253,7 @@ TEST_F(MemorySSATest, SinkLoad) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -261,10 +261,10 @@ TEST_F(MemorySSATest, SinkLoad) {
B.CreateBr(Merge);
// Load in left block
- B.SetInsertPoint(Left, Left->begin());
+ B.SetInsertPoint(Left->begin());
LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Store in merge block
- B.SetInsertPoint(Merge, Merge->begin());
+ B.SetInsertPoint(Merge->begin());
B.CreateStore(B.getInt8(16), PointerArg);
setupAnalyses();
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index 3ed3034f489ce..0adb615deee5b 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -2253,7 +2253,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkshareLoopTarget) {
BasicBlock *Preheader = CLI->getPreheader();
Value *TripCount = CLI->getTripCount();
- Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
IRBuilder<>::InsertPoint AfterIP = OMPBuilder.applyWorkshareLoop(
DL, CLI, AllocaIP, true, OMP_SCHEDULE_Static, nullptr, false, false,
@@ -2317,7 +2317,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkShareLoop) {
Value *IV = CLI->getIndVar();
BasicBlock *ExitBlock = CLI->getExit();
- Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
OMPBuilder.applyWorkshareLoop(DL, CLI, AllocaIP, /*NeedsBarrier=*/true,
@@ -2507,7 +2507,7 @@ TEST_P(OpenMPIRBuilderTestWithParams, DynamicWorkShareLoop) {
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
/*IsSigned=*/false, /*InclusiveStop=*/false);
- Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
// Collect all the info from CLI, as it isn't usable after the call to
@@ -2649,7 +2649,7 @@ TEST_F(OpenMPIRBuilderTest, DynamicWorkShareLoopOrdered) {
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
/*IsSigned=*/false, /*InclusiveStop=*/false);
- Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
// Collect all the info from CLI, as it isn't usable after the call to
@@ -4850,7 +4850,7 @@ static bool findGEPZeroOne(Value *Ptr, Value *&Zero, Value *&One) {
static OpenMPIRBuilder::InsertPointTy
sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
Value *&Result) {
- IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
+ IRBuilder<> Builder(IP.getPoint());
Result = Builder.CreateFAdd(LHS, RHS, "red.add");
return Builder.saveIP();
}
@@ -4858,7 +4858,7 @@ sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
static OpenMPIRBuilder::InsertPointTy
sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
Value *RHS) {
- IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
+ IRBuilder<> Builder(IP.getPoint());
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, LHS, Partial, std::nullopt,
AtomicOrdering::Monotonic);
@@ -4868,7 +4868,7 @@ sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
static OpenMPIRBuilder::InsertPointTy
xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
Value *&Result) {
- IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
+ IRBuilder<> Builder(IP.getPoint());
Result = Builder.CreateXor(LHS, RHS, "red.xor");
return Builder.saveIP();
}
@@ -4876,7 +4876,7 @@ xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
static OpenMPIRBuilder::InsertPointTy
xorAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
Value *RHS) {
- IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
+ IRBuilder<> Builder(IP.getPoint());
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
Builder.CreateAtomicRMW(AtomicRMWInst::Xor, LHS, Partial, std::nullopt,
AtomicOrdering::Monotonic);
diff --git a/llvm/unittests/IR/BasicBlockTest.cpp b/llvm/unittests/IR/BasicBlockTest.cpp
index 3756f227143a5..2e99e5362b735 100644
--- a/llvm/unittests/IR/BasicBlockTest.cpp
+++ b/llvm/unittests/IR/BasicBlockTest.cpp
@@ -202,7 +202,7 @@ TEST_F(InstrOrderInvalidationTest, InsertInvalidation) {
EXPECT_TRUE(BB->isInstrOrderValid());
// Invalidate orders.
- IRBuilder<> Builder(BB, I2->getIterator());
+ IRBuilder<> Builder(I2->getIterator());
Instruction *I1a = Builder.CreateCall(Nop);
EXPECT_FALSE(BB->isInstrOrderValid());
EXPECT_TRUE(I1->comesBefore(I1a));
diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp
index cac8acbe15a79..50f2b0d22d791 100644
--- a/llvm/unittests/IR/DebugInfoTest.cpp
+++ b/llvm/unittests/IR/DebugInfoTest.cpp
@@ -698,7 +698,7 @@ TEST(IRBuilder, GetSetInsertionPointWithEmptyBasicBlock) {
SmallVector<Value *, 3> Args = {DIV, DIV, DIV};
Builder.CreateCall(DbgDeclare, Args);
auto IP = BB->getFirstInsertionPt();
- Builder.SetInsertPoint(BB.get(), IP);
+ Builder.SetInsertPoint(IP);
}
TEST(AssignmentTrackingTest, InstrMethods) {
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index ff96df8581200..8e61cd2088511 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -1188,7 +1188,7 @@ TEST_F(IRBuilderTest, DebugLoc) {
EXPECT_EQ(DL1, Call1->getDebugLoc());
Call1->setDebugLoc(DL2);
- Builder.SetInsertPoint(Call1->getParent(), Call1->getIterator());
+ Builder.SetInsertPoint(Call1->getIterator());
EXPECT_EQ(DL2, Builder.getCurrentDebugLocation());
auto Call2 = Builder.CreateCall(Callee, std::nullopt);
EXPECT_EQ(DL2, Call2->getDebugLoc());
@@ -1311,10 +1311,10 @@ TEST_F(IRBuilderTest, CTAD) {
// The block BB is empty, so don't test this one.
// IRBuilder Builder5(BB->getTerminator());
// static_assert(std::is_same_v<decltype(Builder5), IRBuilder<>>);
- IRBuilder Builder6(BB, BB->end(), Folder);
+ IRBuilder Builder6(BB->end(), Folder);
static_assert(
std::is_same_v<decltype(Builder6), IRBuilder<InstSimplifyFolder>>);
- IRBuilder Builder7(BB, BB->end());
+ IRBuilder Builder7(BB->end());
static_assert(std::is_same_v<decltype(Builder7), IRBuilder<>>);
}
}
diff --git a/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp b/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
index b75a492c58bc4..349b2d856d9af 100644
--- a/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
+++ b/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
@@ -62,7 +62,7 @@ TEST(SSAUpdaterBulk, SimpleMerge) {
Value *SubOp2 = B.CreateSub(FirstArg, ConstantInt::get(I32Ty, 4));
B.CreateBr(MergeBB);
- B.SetInsertPoint(MergeBB, MergeBB->begin());
+ B.SetInsertPoint(MergeBB->begin());
auto *I1 = cast<Instruction>(B.CreateAdd(AddOp1, ConstantInt::get(I32Ty, 5)));
auto *I2 = cast<Instruction>(B.CreateAdd(AddOp2, ConstantInt::get(I32Ty, 6)));
auto *I3 = cast<Instruction>(B.CreateAdd(SubOp1, SubOp2));
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index cbfc64972f38b..abc355df273e2 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -193,7 +193,7 @@ static llvm::BasicBlock *convertOmpOpRegions(
if (continuationBlockPHIs) {
llvm::IRBuilderBase::InsertPointGuard guard(builder);
continuationBlockPHIs->reserve(continuationBlockPHITypes.size());
- builder.SetInsertPoint(continuationBlock, continuationBlock->begin());
+ builder.SetInsertPoint(continuationBlock->begin());
for (llvm::Type *ty : continuationBlockPHITypes)
continuationBlockPHIs->push_back(builder.CreatePHI(ty, numYields));
}
@@ -402,8 +402,7 @@ static LogicalResult inlineConvertOmpRegions(
return failure();
if (continuationBlockArgs)
llvm::append_range(*continuationBlockArgs, phis);
- builder.SetInsertPoint(continuationBlock,
- continuationBlock->getFirstInsertionPt());
+ builder.SetInsertPoint(continuationBlock->getFirstInsertionPt());
return success();
}
diff --git a/polly/lib/CodeGen/BlockGenerators.cpp b/polly/lib/CodeGen/BlockGenerators.cpp
index da0e503b23efc..90bdc0bb18c89 100644
--- a/polly/lib/CodeGen/BlockGenerators.cpp
+++ b/polly/lib/CodeGen/BlockGenerators.cpp
@@ -630,9 +630,9 @@ void BlockGenerator::generateConditionalExecution(
// Put the client code into the conditional block and continue in the merge
// block afterwards.
- Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(ThenBlock->getFirstInsertionPt());
GenThenFunc();
- Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(TailBlock->getFirstInsertionPt());
}
static std::string getInstName(Value *Val) {
>From 75e58783d294dfbd1b009e773dd855a1d77867b1 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Mon, 24 Jun 2024 17:07:07 +0100
Subject: [PATCH 2/2] Address review comments
---
llvm/include/llvm/IR/Instruction.h | 3 ++-
llvm/lib/IR/Core.cpp | 2 ++
llvm/lib/Target/ARM/ARMParallelDSP.cpp | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index c315a41ebcf8b..914a00be1e873 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -46,7 +46,8 @@ getDbgRecordRange(DbgMarker *);
/// Class used to generate an insert position (ultimately always a
/// BasicBlock::iterator, which it will implicitly convert to) from either:
-/// - An Instruction, inserting immediately prior.
+/// - An Instruction, inserting immediately prior. This will soon be marked as
+/// deprecated.
/// - A BasicBlock, inserting at the end.
/// - An iterator, inserting at its position.
/// - Any nullptr value, giving a blank iterator (not valid for insertion).
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index 6b265bfe84d5b..d8aa9b80fb623 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -3135,6 +3135,8 @@ LLVMBuilderRef LLVMCreateBuilder(void) {
static void LLVMPositionBuilderImpl(IRBuilder<> *Builder, BasicBlock *Block,
Instruction *Instr, bool BeforeDbgRecords) {
BasicBlock::iterator I = Instr ? Instr->getIterator() : Block->end();
+ assert(I.getNodeParent() == Block &&
+ "Non-null Instr must be contained in Block!");
I.setHeadBit(BeforeDbgRecords);
Builder->SetInsertPoint(I);
}
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index 2925bde56dfd1..7383f01197171 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -637,7 +637,7 @@ void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
- IRBuilder<NoFolder> Builder((BasicBlock::iterator(InsertAfter)));
+ IRBuilder<NoFolder> Builder(InsertAfter->getIterator());
Instruction *Call = Builder.CreateCall(SMLAD, Args);
NumSMLAD++;
return Call;
More information about the cfe-commits
mailing list