[llvm] c07ab9e - [VPlan] Set debug location for recipes in VPBB::executeRecipes.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 5 08:24:00 PDT 2025
Author: Florian Hahn
Date: 2025-04-05T16:22:47+01:00
New Revision: c07ab9e2ab0f288c10af172d11a3936b89b952fb
URL: https://github.com/llvm/llvm-project/commit/c07ab9e2ab0f288c10af172d11a3936b89b952fb
DIFF: https://github.com/llvm/llvm-project/commit/c07ab9e2ab0f288c10af172d11a3936b89b952fb.diff
LOG: [VPlan] Set debug location for recipes in VPBB::executeRecipes.
Set the debug location for each recipe before executing the recipe,
instead of ad-hoc setting the debug location during individual recipe
execution.
This simplifies the code and ensures that all recipe repsect the
recipe's debug location. There are some minor changes, where previously
we would re-use a previously set debug location.
Added:
Modified:
llvm/lib/Transforms/Vectorize/VPlan.cpp
llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
llvm/test/Transforms/LoopVectorize/debugloc.ll
llvm/test/Transforms/LoopVectorize/if-pred-non-void.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 1e2f70e5c103e..85fd34d79be42 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -552,8 +552,10 @@ void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) {
State->CFG.PrevVPBB = this;
- for (VPRecipeBase &Recipe : Recipes)
+ for (VPRecipeBase &Recipe : Recipes) {
+ State->setDebugLocFrom(Recipe.getDebugLoc());
Recipe.execute(*State);
+ }
LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 7b5c6b6f6f76e..ea56b4fa3c833 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -327,7 +327,6 @@ VPPartialReductionRecipe::computeCost(ElementCount VF,
}
void VPPartialReductionRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
auto &Builder = State.Builder;
assert(getOpcode() == Instruction::Add &&
@@ -882,7 +881,6 @@ void VPInstruction::execute(VPTransformState &State) {
"Recipe not a FPMathOp but has fast-math flags?");
if (hasFastMathFlags())
State.Builder.setFastMathFlags(getFastMathFlags());
- State.setDebugLocFrom(getDebugLoc());
bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
(vputils::onlyFirstLaneUsed(this) ||
isVectorToScalar() || isSingleScalar());
@@ -1163,7 +1161,6 @@ void VPIRPhi::print(raw_ostream &O, const Twine &Indent,
void VPWidenCallRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "not widening");
- State.setDebugLocFrom(getDebugLoc());
FunctionType *VFTy = Variant->getFunctionType();
// Add return type if intrinsic is overloaded on it.
@@ -1232,7 +1229,6 @@ void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent,
void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
assert(State.VF.isVector() && "not widening");
- State.setDebugLocFrom(getDebugLoc());
SmallVector<Type *, 2> TysForDecl;
// Add return type if intrinsic is overloaded on it.
@@ -1355,7 +1351,6 @@ void VPWidenIntrinsicRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPHistogramRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
IRBuilderBase &Builder = State.Builder;
Value *Address = State.get(getOperand(0));
@@ -1456,8 +1451,6 @@ void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenSelectRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
-
// The condition can be loop invariant but still defined inside the
// loop. This means that we can't just use the original 'cond' value.
// We have to take the 'vectorized' value and pick the first lane.
@@ -1569,7 +1562,6 @@ void VPRecipeWithIRFlags::printFlags(raw_ostream &O) const {
#endif
void VPWidenRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
auto &Builder = State.Builder;
switch (Opcode) {
case Instruction::Call:
@@ -1750,7 +1742,6 @@ void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPWidenCastRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
auto &Builder = State.Builder;
/// Vectorize casts.
assert(State.VF.isVector() && "Not vectorizing?");
@@ -2029,7 +2020,6 @@ void VPDerivedIVRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
// Fast-math-flags propagate from the original induction instruction.
IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
if (hasFastMathFlags())
@@ -2223,7 +2213,6 @@ static Type *getGEPIndexTy(bool IsScalable, bool IsReverse,
void VPVectorEndPointerRecipe::execute(VPTransformState &State) {
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
unsigned CurrentPart = getUnrollPart(*this);
Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ true,
CurrentPart, Builder);
@@ -2259,7 +2248,6 @@ void VPVectorEndPointerRecipe::print(raw_ostream &O, const Twine &Indent,
void VPVectorPointerRecipe::execute(VPTransformState &State) {
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
unsigned CurrentPart = getUnrollPart(*this);
Type *IndexTy = getGEPIndexTy(State.VF.isScalable(), /*IsReverse*/ false,
CurrentPart, Builder);
@@ -2285,7 +2273,6 @@ void VPVectorPointerRecipe::print(raw_ostream &O, const Twine &Indent,
void VPBlendRecipe::execute(VPTransformState &State) {
assert(isNormalized() && "Expected blend to be normalized!");
- State.setDebugLocFrom(getDebugLoc());
// We know that all PHIs in non-header blocks are converted into
// selects, so we don't have to worry about the insertion order and we
// can just use the builder.
@@ -2367,7 +2354,6 @@ void VPReductionRecipe::execute(VPTransformState &State) {
// Propagate the fast-math flags carried by the underlying instruction.
IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
State.Builder.setFastMathFlags(getFastMathFlags());
- State.setDebugLocFrom(getDebugLoc());
Value *NewVecOp = State.get(getVecOp());
if (VPValue *Cond = getCondOp()) {
Value *NewCond = State.get(Cond, State.VF.isScalar());
@@ -2570,7 +2556,6 @@ void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent,
#endif
Value *VPScalarCastRecipe ::generate(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
assert(vputils::onlyFirstLaneUsed(this) &&
"Codegen only implemented for first lane.");
switch (Opcode) {
@@ -2602,7 +2587,6 @@ void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
#endif
void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
assert(State.Lane && "Branch on Mask works only on single instance.");
VPValue *BlockInMask = getOperand(0);
@@ -2628,7 +2612,6 @@ InstructionCost VPBranchOnMaskRecipe::computeCost(ElementCount VF,
}
void VPPredInstPHIRecipe::execute(VPTransformState &State) {
- State.setDebugLocFrom(getDebugLoc());
assert(State.Lane && "Predicated instruction PHI works per instance.");
Instruction *ScalarPredInst =
cast<Instruction>(State.get(getOperand(0), *State.Lane));
@@ -2734,7 +2717,6 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
bool CreateGather = !isConsecutive();
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
Value *Mask = nullptr;
if (auto *VPMask = getMask()) {
// Mask reversal is only needed for non-all-one (null) masks, as reverse
@@ -2793,7 +2775,6 @@ void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
bool CreateGather = !isConsecutive();
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
CallInst *NewLI;
Value *EVL = State.get(getEVL(), VPLane(0));
Value *Addr = State.get(getAddr(), !CreateGather);
@@ -2868,7 +2849,6 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {
const Align Alignment = getLoadStoreAlignment(&Ingredient);
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
Value *Mask = nullptr;
if (auto *VPMask = getMask()) {
@@ -2914,7 +2894,6 @@ void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
const Align Alignment = getLoadStoreAlignment(&Ingredient);
auto &Builder = State.Builder;
- State.setDebugLocFrom(getDebugLoc());
CallInst *NewSI = nullptr;
Value *StoredVal = State.get(StoredValue);
@@ -3717,7 +3696,6 @@ void VPWidenPHIRecipe::execute(VPTransformState &State) {
assert(EnableVPlanNativePath &&
"Non-native vplans are not expected to have VPWidenPHIRecipes.");
- State.setDebugLocFrom(getDebugLoc());
Value *Op0 = State.get(getOperand(0));
Type *VecTy = Op0->getType();
Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, Name);
@@ -3743,7 +3721,6 @@ void VPActiveLaneMaskPHIRecipe::execute(VPTransformState &State) {
PHINode *Phi =
State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
Phi->addIncoming(StartMask, VectorPH);
- Phi->setDebugLoc(getDebugLoc());
State.set(this, Phi);
}
diff --git a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
index 15510060e0c6c..e6b8946f10723 100644
--- a/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/dbg-outer-loop-vect.ll
@@ -15,23 +15,23 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND_CLEANUP32:%.*]] ]
; CHECK-NEXT: br label [[FOR_COND5_PREHEADER1:%.*]], !dbg [[DBG21]]
; CHECK: for.cond5.preheader1:
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_COND5_PREHEADER1]] ], !dbg [[DBG34:![0-9]+]]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, [[VECTOR_BODY]] ], [ [[TMP4:%.*]], [[FOR_COND5_PREHEADER1]] ], !dbg [[DBG22:![0-9]+]]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[H]], <4 x i64> [[VEC_PHI]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> zeroinitializer, <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG22:![0-9]+]]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 1, !dbg [[DBG22]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 1), <4 x ptr> [[TMP1]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG22]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 2, !dbg [[DBG22]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 2), <4 x ptr> [[TMP2]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG22]]
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 3, !dbg [[DBG22]]
-; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 3), <4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG22]]
-; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1), !dbg [[DBG24:![0-9]+]]
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 5), !dbg [[DBG25:![0-9]+]]
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0, !dbg [[DBG26:![0-9]+]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP32]], label [[FOR_COND5_PREHEADER1]], !dbg [[DBG26]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> zeroinitializer, <4 x ptr> [[TMP0]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG23:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 1, !dbg [[DBG25:![0-9]+]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 1), <4 x ptr> [[TMP1]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG23]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 2, !dbg [[DBG25]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 2), <4 x ptr> [[TMP2]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG23]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, <4 x ptr> [[TMP0]], i64 3, !dbg [[DBG25]]
+; CHECK-NEXT: call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> splat (i32 3), <4 x ptr> [[TMP3]], i32 4, <4 x i1> splat (i1 true)), !dbg [[DBG23]]
+; CHECK-NEXT: [[TMP4]] = add nuw nsw <4 x i64> [[VEC_PHI]], splat (i64 1), !dbg [[DBG26:![0-9]+]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <4 x i64> [[TMP4]], splat (i64 5), !dbg [[DBG27:![0-9]+]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP5]], i32 0, !dbg [[DBG28:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP32]], label [[FOR_COND5_PREHEADER1]], !dbg [[DBG28]]
; CHECK: vector.latch:
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20
-; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]], !dbg [[DBG21]]
; CHECK: scalar.ph:
@@ -40,27 +40,27 @@ define void @foo(ptr %h) !dbg !4 {
; CHECK: for.cond1.preheader:
; CHECK-NEXT: [[I_023:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC13:%.*]], [[FOR_COND_CLEANUP3:%.*]] ]
; CHECK-NEXT: #dbg_value(i64 [[I_023]], [[META11]], !DIExpression(), [[META20]])
-; CHECK-NEXT: br label [[FOR_COND5_PREHEADER:%.*]], !dbg [[DBG26]]
+; CHECK-NEXT: br label [[FOR_COND5_PREHEADER:%.*]], !dbg [[DBG28]]
; CHECK: for.cond5.preheader:
-; CHECK-NEXT: [[L_022:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INC10:%.*]], [[FOR_COND5_PREHEADER]] ], !dbg [[DBG34]]
+; CHECK-NEXT: [[L_022:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INC10:%.*]], [[FOR_COND5_PREHEADER]] ], !dbg [[DBG22]]
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[H]], i64 [[L_022]]
-; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4, !dbg [[DBG22]]
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[TMP10]], i64 1, !dbg [[DBG31:![0-9]+]]
-; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX_1]], align 4, !dbg [[DBG22]]
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, ptr [[TMP10]], i64 2, !dbg [[DBG31]]
-; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX_2]], align 4, !dbg [[DBG22]]
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr i32, ptr [[TMP10]], i64 3, !dbg [[DBG31]]
-; CHECK-NEXT: store i32 3, ptr [[ARRAYIDX_3]], align 4, !dbg [[DBG22]]
-; CHECK-NEXT: [[INC10]] = add nuw nsw i64 [[L_022]], 1, !dbg [[DBG24]]
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC10]], 5, !dbg [[DBG25]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP3]], label [[FOR_COND5_PREHEADER]], !dbg [[DBG26]]
+; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4, !dbg [[DBG23]]
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr i32, ptr [[TMP10]], i64 1, !dbg [[DBG25]]
+; CHECK-NEXT: store i32 1, ptr [[ARRAYIDX_1]], align 4, !dbg [[DBG23]]
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr i32, ptr [[TMP10]], i64 2, !dbg [[DBG25]]
+; CHECK-NEXT: store i32 2, ptr [[ARRAYIDX_2]], align 4, !dbg [[DBG23]]
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr i32, ptr [[TMP10]], i64 3, !dbg [[DBG25]]
+; CHECK-NEXT: store i32 3, ptr [[ARRAYIDX_3]], align 4, !dbg [[DBG23]]
+; CHECK-NEXT: [[INC10]] = add nuw nsw i64 [[L_022]], 1, !dbg [[DBG26]]
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC10]], 5, !dbg [[DBG27]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP3]], label [[FOR_COND5_PREHEADER]], !dbg [[DBG28]]
; CHECK: for.cond.cleanup3:
-; CHECK-NEXT: [[INC13]] = add nuw nsw i64 [[I_023]], 1, !dbg [[DBG32:![0-9]+]]
+; CHECK-NEXT: [[INC13]] = add nuw nsw i64 [[I_023]], 1, !dbg [[DBG33:![0-9]+]]
; CHECK-NEXT: #dbg_value(i64 [[INC13]], [[META11]], !DIExpression(), [[META20]])
-; CHECK-NEXT: [[EXITCOND24_NOT:%.*]] = icmp eq i64 [[INC13]], 23, !dbg [[DBG33:![0-9]+]]
-; CHECK-NEXT: br i1 [[EXITCOND24_NOT]], label [[EXIT]], label [[FOR_COND1_PREHEADER]], !dbg [[DBG21]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK-NEXT: [[EXITCOND24_NOT:%.*]] = icmp eq i64 [[INC13]], 23, !dbg [[DBG34:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND24_NOT]], label [[EXIT]], label [[FOR_COND1_PREHEADER]], !dbg [[DBG21]], !llvm.loop [[LOOP35:![0-9]+]]
; CHECK: exit:
-; CHECK-NEXT: ret void, !dbg [[DBG35:![0-9]+]]
+; CHECK-NEXT: ret void, !dbg [[DBG36:![0-9]+]]
;
entry:
call void @llvm.dbg.value(metadata i64 0, metadata !11, metadata !DIExpression()), !dbg !20
@@ -156,18 +156,19 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
; CHECK: [[META19]] = distinct !DILexicalBlock(scope: [[META15]], file: [[META1]], line: 11, column: 5)
; CHECK: [[META20]] = !DILocation(line: 0, scope: [[META12]])
; CHECK: [[DBG21]] = !DILocation(line: 10, column: 3, scope: [[META12]])
-; CHECK: [[DBG22]] = !DILocation(line: 13, column: 11, scope: [[META23:![0-9]+]])
-; CHECK: [[META23]] = distinct !DILexicalBlock(scope: [[META18]], file: [[META1]], line: 12, column: 7)
-; CHECK: [[DBG24]] = !DILocation(line: 11, column: 32, scope: [[META19]])
-; CHECK: [[DBG25]] = !DILocation(line: 11, column: 26, scope: [[META19]])
-; CHECK: [[DBG26]] = !DILocation(line: 11, column: 5, scope: [[META15]])
-; CHECK: [[LOOP27]] = distinct !{[[LOOP27]], [[DBG21]], [[META28:![0-9]+]], [[META29:![0-9]+]], [[META30:![0-9]+]]}
-; CHECK: [[META28]] = !DILocation(line: 13, column: 13, scope: [[META12]])
-; CHECK: [[META29]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META30]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[DBG31]] = !DILocation(line: 13, column: 2, scope: [[META23]])
-; CHECK: [[DBG32]] = !DILocation(line: 10, column: 30, scope: [[META16]])
-; CHECK: [[DBG33]] = !DILocation(line: 10, column: 24, scope: [[META16]])
-; CHECK: [[LOOP34]] = distinct !{[[LOOP34]], [[DBG21]], [[META28]], [[META29]]}
-; CHECK: [[DBG35]] = !DILocation(line: 14, column: 1, scope: [[DBG4]])
+; CHECK: [[DBG22]] = !DILocation(line: 10, column: 5, scope: [[META12]])
+; CHECK: [[DBG23]] = !DILocation(line: 13, column: 11, scope: [[META24:![0-9]+]])
+; CHECK: [[META24]] = distinct !DILexicalBlock(scope: [[META18]], file: [[META1]], line: 12, column: 7)
+; CHECK: [[DBG25]] = !DILocation(line: 13, column: 2, scope: [[META24]])
+; CHECK: [[DBG26]] = !DILocation(line: 11, column: 32, scope: [[META19]])
+; CHECK: [[DBG27]] = !DILocation(line: 11, column: 26, scope: [[META19]])
+; CHECK: [[DBG28]] = !DILocation(line: 11, column: 5, scope: [[META15]])
+; CHECK: [[LOOP29]] = distinct !{[[LOOP29]], [[DBG21]], [[META30:![0-9]+]], [[META31:![0-9]+]], [[META32:![0-9]+]]}
+; CHECK: [[META30]] = !DILocation(line: 13, column: 13, scope: [[META12]])
+; CHECK: [[META31]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META32]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[DBG33]] = !DILocation(line: 10, column: 30, scope: [[META16]])
+; CHECK: [[DBG34]] = !DILocation(line: 10, column: 24, scope: [[META16]])
+; CHECK: [[LOOP35]] = distinct !{[[LOOP35]], [[DBG21]], [[META30]], [[META31]]}
+; CHECK: [[DBG36]] = !DILocation(line: 14, column: 1, scope: [[DBG4]])
;.
diff --git a/llvm/test/Transforms/LoopVectorize/debugloc.ll b/llvm/test/Transforms/LoopVectorize/debugloc.ll
index c31f438feae6e..8fe355c6d567d 100644
--- a/llvm/test/Transforms/LoopVectorize/debugloc.ll
+++ b/llvm/test/Transforms/LoopVectorize/debugloc.ll
@@ -67,8 +67,8 @@ define i32 @test_debug_loc_on_branch_in_loop(ptr noalias %src, ptr noalias %dst)
; CHECK-NEXT: br i1 [[EXT]], label %pred.store.if, label %pred.store.continue, !dbg [[LOC3]]
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
-; CHECK-NEXT: [[GEP:%.+]] = getelementptr inbounds i32, ptr %dst, i64 {{.+}}, !dbg [[LOC3]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4, !dbg [[LOC3]]
+; CHECK-NEXT: [[GEP:%.+]] = getelementptr inbounds i32, ptr %dst, i64 {{.+}}
+; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
; CHECK-NEXT: br label %pred.store.continue, !dbg [[LOC3]]
; CHECK-EMPTY:
;
@@ -107,7 +107,7 @@ define i32 @test_
diff erent_debug_loc_on_replicate_recipe(ptr noalias %src, ptr n
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: [[GEP:%.+]] = getelementptr inbounds i32, ptr %dst, i64 {{.+}}, !dbg [[LOC5:!.+]]
-; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4, !dbg [[LOC5]]
+; CHECK-NEXT: store i32 0, ptr [[GEP]], align 4
; CHECK-NEXT: br label %pred.store.continue, !dbg [[LOC4]]
; CHECK-EMPTY:
;
diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-non-void.ll b/llvm/test/Transforms/LoopVectorize/if-pred-non-void.ll
index fa03c30b8752d..d717a3feed3ea 100644
--- a/llvm/test/Transforms/LoopVectorize/if-pred-non-void.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-pred-non-void.ll
@@ -636,8 +636,8 @@ define void @pr30172(ptr nocapture %asd, ptr nocapture %bsd) !dbg !5 {;
; UNROLL-NO-VF-NEXT: [[TMP13:%.*]] = icmp slt i32 [[TMP5]], 100
; UNROLL-NO-VF-NEXT: [[TMP17:%.*]] = xor i1 [[TMP12]], true, !dbg [[DBG34:![0-9]+]]
; UNROLL-NO-VF-NEXT: [[TMP14:%.*]] = xor i1 [[TMP13]], true, !dbg [[DBG34]]
-; UNROLL-NO-VF-NEXT: [[TMP15:%.*]] = icmp sge i32 [[TMP4]], 200, !dbg [[DBG34]]
-; UNROLL-NO-VF-NEXT: [[TMP16:%.*]] = icmp sge i32 [[TMP5]], 200, !dbg [[DBG34]]
+; UNROLL-NO-VF-NEXT: [[TMP15:%.*]] = icmp sge i32 [[TMP4]], 200
+; UNROLL-NO-VF-NEXT: [[TMP16:%.*]] = icmp sge i32 [[TMP5]], 200
; UNROLL-NO-VF-NEXT: [[TMP18:%.*]] = select i1 [[TMP17]], i1 [[TMP15]], i1 false, !dbg [[DBG35:![0-9]+]]
; UNROLL-NO-VF-NEXT: [[TMP19:%.*]] = select i1 [[TMP14]], i1 [[TMP16]], i1 false, !dbg [[DBG35]]
; UNROLL-NO-VF-NEXT: [[TMP20:%.*]] = or i1 [[TMP18]], [[TMP12]]
More information about the llvm-commits
mailing list