[llvm] [VPlan] Replace PhiR operand of ComputeRdxResult with VPIRFlags. (PR #174026)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 30 13:58:19 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-transforms
@llvm/pr-subscribers-backend-risc-v
Author: Florian Hahn (fhahn)
<details>
<summary>Changes</summary>
Remove the artificial PhiR operand of ComputeReductionResult, which was only used to look up recurrence kind, in-loop and ordered properties.
Instead, encode them as VPIRFlags as suggested by @<!-- -->ayalz in https://github.com/llvm/llvm-project/pull/170223.
This addresses a TODO to make codegen for ComputeReductionResult independent of looking up information from other recipes.
This is NFC w.r.t. codegen, the printing has been improved to include the reduction type, and whether it is in-loop/ordered.
---
Patch is 31.73 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/174026.diff
10 Files Affected:
- (modified) llvm/lib/Transforms/Vectorize/LoopVectorize.cpp (+44-16)
- (modified) llvm/lib/Transforms/Vectorize/VPlan.h (+51-4)
- (modified) llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp (+21-14)
- (modified) llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp (+35-16)
- (modified) llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp (+1-1)
- (modified) llvm/lib/Transforms/Vectorize/VPlanUtils.h (+1-1)
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll (+4-4)
- (modified) llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll (+4-4)
- (modified) llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll (+1-1)
- (modified) llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll (+13-13)
``````````diff
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f90937ecedad9..88b19a26e98a0 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -7305,8 +7305,20 @@ static void fixReductionScalarResumeWhenVectorizingEpilog(
EpiRedResult->getOpcode() != VPInstruction::ComputeFindIVResult))
return;
- auto *EpiRedHeaderPhi =
- cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
+ VPReductionPHIRecipe *EpiRedHeaderPhi;
+ if (EpiRedResult->getOpcode() == VPInstruction::ComputeReductionResult) {
+ // Find the reduction phi by looking at the other user of operand 0.
+ VPValue *Op = EpiRedResult->getOperand(0);
+ auto *OtherUser = *find_if(
+ Op->users(), [EpiRedResult](VPUser *U) { return U != EpiRedResult; });
+ if (auto *Phi = dyn_cast<VPReductionPHIRecipe>(OtherUser))
+ EpiRedHeaderPhi = Phi;
+ else // For truncated reductions, look through the cast.
+ EpiRedHeaderPhi = cast<VPReductionPHIRecipe>(
+ cast<VPWidenCastRecipe>(OtherUser)->getSingleUser());
+ } else {
+ EpiRedHeaderPhi = cast<VPReductionPHIRecipe>(EpiRedResult->getOperand(0));
+ }
RecurKind Kind = EpiRedHeaderPhi->getRecurrenceKind();
Value *MainResumeValue;
if (auto *VPI = dyn_cast<VPInstruction>(EpiRedHeaderPhi->getStartValue())) {
@@ -8726,13 +8738,15 @@ void LoopVectorizationPlanner::addReductionResultComputation(
Builder.createNaryOp(VPInstruction::ComputeAnyOfResult,
{PhiR, Start, NewExitingVPV}, ExitDL);
} else {
- VPIRFlags Flags =
+ FastMathFlags FMFs =
RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurrenceKind)
- ? VPIRFlags(RdxDesc.getFastMathFlags())
- : VPIRFlags();
+ ? RdxDesc.getFastMathFlags()
+ : FastMathFlags();
+ VPIRFlags Flags(RecurrenceKind, PhiR->isOrdered(), PhiR->isInLoop(),
+ FMFs);
FinalReductionResult =
Builder.createNaryOp(VPInstruction::ComputeReductionResult,
- {PhiR, NewExitingVPV}, Flags, ExitDL);
+ {NewExitingVPV}, Flags, ExitDL);
}
// If the vector reduction can be performed in a smaller type, we truncate
// then extend the loop exit value to enable InstCombine to evaluate the
@@ -8760,8 +8774,8 @@ void LoopVectorizationPlanner::addReductionResultComputation(
PhiR->setOperand(1, Extnd->getVPSingleValue());
// Update ComputeReductionResult with the truncated exiting value and
- // extend its result.
- FinalReductionResult->setOperand(1, Trunc);
+ // extend its result. Operand 0 is the first reduction part.
+ FinalReductionResult->setOperand(0, Trunc);
FinalReductionResult =
Builder.createScalarCast(ExtendOpc, FinalReductionResult, PhiTy, {});
}
@@ -9378,14 +9392,28 @@ static SmallVector<Instruction *> preparePlanForEpilogueVectorLoop(
Value *ResumeV = nullptr;
// TODO: Move setting of resume values to prepareToExecute.
if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) {
- auto *RdxResult =
- cast<VPInstruction>(*find_if(ReductionPhi->users(), [](VPUser *U) {
- auto *VPI = dyn_cast<VPInstruction>(U);
- return VPI &&
- (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
- VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
- VPI->getOpcode() == VPInstruction::ComputeFindIVResult);
- }));
+ // Find the reduction result by looking at users of the phi, its backedge
+ // value, or for truncated reductions, the trunc feeding the backedge.
+ auto FindReductionResult = [](VPValue *V) -> VPInstruction * {
+ for (VPUser *U : V->users())
+ if (auto *VPI = dyn_cast<VPInstruction>(U))
+ if (VPI->getOpcode() == VPInstruction::ComputeAnyOfResult ||
+ VPI->getOpcode() == VPInstruction::ComputeReductionResult ||
+ VPI->getOpcode() == VPInstruction::ComputeFindIVResult)
+ return VPI;
+ return nullptr;
+ };
+ VPInstruction *RdxResult = FindReductionResult(ReductionPhi);
+ if (!RdxResult)
+ RdxResult = FindReductionResult(ReductionPhi->getBackedgeValue());
+ // For truncated reductions, look through the extension on the backedge.
+ VPValue *TruncVal;
+ if (!RdxResult &&
+ VPlanPatternMatch::match(ReductionPhi->getBackedgeValue(),
+ VPlanPatternMatch::m_ZExtOrSExt(
+ VPlanPatternMatch::m_VPValue(TruncVal))))
+ RdxResult = FindReductionResult(TruncVal);
+ assert(RdxResult && "expected to find reduction result");
ResumeV = cast<PHINode>(ReductionPhi->getUnderlyingInstr())
->getIncomingValueForBlock(L->getLoopPreheader());
RecurKind RK = ReductionPhi->getRecurrenceKind();
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index e2dfc4678c6d0..798e107c0eb16 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -617,6 +617,7 @@ class VPIRFlags {
GEPOp,
FPMathOp,
NonNegOp,
+ ReductionOp,
Other
};
@@ -666,6 +667,18 @@ class VPIRFlags {
CmpInst::Predicate Pred;
FastMathFlagsTy FMFs;
};
+ /// Holds reduction-specific flags: RecurKind, IsOrdered, IsInLoop, and FMFs.
+ struct ReductionFlagsTy {
+ unsigned char Kind : 6; // RecurKind has ~26 values, needs 5 bits
+ unsigned char IsOrdered : 1;
+ unsigned char IsInLoop : 1;
+ FastMathFlagsTy FMFs;
+
+ ReductionFlagsTy(RecurKind Kind, bool IsOrdered, bool IsInLoop,
+ FastMathFlags FMFs)
+ : Kind(static_cast<unsigned char>(Kind)), IsOrdered(IsOrdered),
+ IsInLoop(IsInLoop), FMFs(FMFs) {}
+ };
OperationType OpType;
@@ -679,6 +692,7 @@ class VPIRFlags {
NonNegFlagsTy NonNegFlags;
FastMathFlagsTy FMFs;
FCmpFlagsTy FCmpFlags;
+ ReductionFlagsTy ReductionFlags;
unsigned AllFlags;
};
@@ -746,6 +760,10 @@ class VPIRFlags {
VPIRFlags(GEPNoWrapFlags GEPFlags)
: OpType(OperationType::GEPOp), GEPFlags(GEPFlags) {}
+ VPIRFlags(RecurKind Kind, bool IsOrdered, bool IsInLoop, FastMathFlags FMFs)
+ : OpType(OperationType::ReductionOp),
+ ReductionFlags(Kind, IsOrdered, IsInLoop, FMFs) {}
+
void transferFlags(VPIRFlags &Other) {
OpType = Other.OpType;
AllFlags = Other.AllFlags;
@@ -787,6 +805,7 @@ class VPIRFlags {
break;
case OperationType::Cmp:
case OperationType::Other:
+ case OperationType::ReductionOp:
break;
}
}
@@ -828,6 +847,7 @@ class VPIRFlags {
break;
case OperationType::Cmp:
case OperationType::Other:
+ case OperationType::ReductionOp:
break;
}
}
@@ -856,7 +876,8 @@ class VPIRFlags {
/// Returns true if the recipe has fast-math flags.
bool hasFastMathFlags() const {
- return OpType == OperationType::FPMathOp || OpType == OperationType::FCmp;
+ return OpType == OperationType::FPMathOp || OpType == OperationType::FCmp ||
+ OpType == OperationType::ReductionOp;
}
LLVM_ABI_FOR_TEST FastMathFlags getFastMathFlags() const;
@@ -898,13 +919,39 @@ class VPIRFlags {
return DisjointFlags.IsDisjoint;
}
+ RecurKind getRecurKind() const {
+ assert(OpType == OperationType::ReductionOp &&
+ "recipe doesn't have reduction flags");
+ return static_cast<RecurKind>(ReductionFlags.Kind);
+ }
+
+ bool isReductionOrdered() const {
+ assert(OpType == OperationType::ReductionOp &&
+ "recipe doesn't have reduction flags");
+ return ReductionFlags.IsOrdered;
+ }
+
+ bool isReductionInLoop() const {
+ assert(OpType == OperationType::ReductionOp &&
+ "recipe doesn't have reduction flags");
+ return ReductionFlags.IsInLoop;
+ }
+
private:
- /// Get a reference to the fast-math flags for FPMathOp or FCmp.
+ /// Get a reference to the fast-math flags for FPMathOp, FCmp or ReductionOp.
FastMathFlagsTy &getFMFsRef() {
- return OpType == OperationType::FCmp ? FCmpFlags.FMFs : FMFs;
+ if (OpType == OperationType::FCmp)
+ return FCmpFlags.FMFs;
+ if (OpType == OperationType::ReductionOp)
+ return ReductionFlags.FMFs;
+ return FMFs;
}
const FastMathFlagsTy &getFMFsRef() const {
- return OpType == OperationType::FCmp ? FCmpFlags.FMFs : FMFs;
+ if (OpType == OperationType::FCmp)
+ return FCmpFlags.FMFs;
+ if (OpType == OperationType::ReductionOp)
+ return ReductionFlags.FMFs;
+ return FMFs;
}
public:
diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
index 0a7e09c2d7552..08897b1ab5ca1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp
@@ -1228,12 +1228,23 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) {
// If we exit early due to NaNs, compute the final reduction result based on
// the reduction phi at the beginning of the last vector iteration.
+ VPValue *BackedgeVal = RedPhiR->getBackedgeValue();
auto *RdxResult =
- findUserOf<VPInstruction::ComputeReductionResult>(RedPhiR);
+ findUserOf<VPInstruction::ComputeReductionResult>(BackedgeVal);
+
+ // Look through selects inserted for tail folding.
+ if (!RdxResult) {
+ VPReductionPHIRecipe *PhiR = RedPhiR;
+ auto *SelR = cast<VPSingleDefRecipe>(*find_if(
+ BackedgeVal->users(), [PhiR](VPUser *U) { return U != PhiR; }));
+ RdxResult = findUserOf<VPInstruction::ComputeReductionResult>(SelR);
+ if (!RdxResult)
+ return false;
+ }
auto *NewSel = MiddleBuilder.createSelect(AnyNaNLane, RedPhiR,
- RdxResult->getOperand(1));
- RdxResult->setOperand(1, NewSel);
+ RdxResult->getOperand(0));
+ RdxResult->setOperand(0, NewSel);
assert(!RdxResults.contains(RdxResult) && "RdxResult already used");
RdxResults.insert(RdxResult);
}
@@ -1319,8 +1330,7 @@ bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan) {
if (!match(MinMaxOp, m_Intrinsic(ExpectedIntrinsicID)))
return false;
- // MinMaxOp must have 2 users: 1) MinMaxPhiR and 2) ComputeReductionResult
- // (asserted below).
+ // MinMaxOp must have 2 users: 1) MinMaxPhiR and 2) ComputeReductionResult.
assert(MinMaxOp->getNumUsers() == 2 &&
"MinMaxOp must have exactly 2 users");
VPValue *MinMaxOpValue = MinMaxOp->getOperand(0);
@@ -1339,20 +1349,17 @@ bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan) {
if (MinMaxOpValue != CmpOpB)
Pred = CmpInst::getSwappedPredicate(Pred);
- // MinMaxPhiR must have exactly 3 users:
+ // MinMaxPhiR must have exactly 2 users:
// * MinMaxOp,
- // * Cmp (that's part of a FindLastIV chain),
- // * ComputeReductionResult.
- if (MinMaxPhiR->getNumUsers() != 3)
+ // * Cmp (that's part of a FindLastIV chain).
+ if (MinMaxPhiR->getNumUsers() != 2)
return false;
VPInstruction *MinMaxResult =
- findUserOf<VPInstruction::ComputeReductionResult>(MinMaxPhiR);
+ findUserOf<VPInstruction::ComputeReductionResult>(MinMaxOp);
assert(is_contained(MinMaxPhiR->users(), MinMaxOp) &&
"one user must be MinMaxOp");
- assert(MinMaxResult && "MinMaxResult must be a user of MinMaxPhiR");
- assert(is_contained(MinMaxOp->users(), MinMaxResult) &&
- "MinMaxResult must be a user of MinMaxOp (and of MinMaxPhiR");
+ assert(MinMaxResult && "MinMaxResult must be a user of MinMaxOp");
// Cmp must be used by the select of a FindLastIV chain.
VPValue *Sel = dyn_cast<VPSingleDefRecipe>(Cmp->getSingleUser());
@@ -1429,7 +1436,7 @@ bool VPlanTransforms::handleMultiUseReductions(VPlan &Plan) {
FindIVResult->getIterator());
VPBuilder B(FindIVResult);
- VPValue *MinMaxExiting = MinMaxResult->getOperand(1);
+ VPValue *MinMaxExiting = MinMaxResult->getOperand(0);
auto *FinalMinMaxCmp =
B.createICmp(CmpInst::ICMP_EQ, MinMaxExiting, MinMaxResult);
VPValue *Sentinel = FindIVResult->getOperand(2);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 991e32a24fe2d..11477197dcd49 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -348,6 +348,16 @@ void VPIRFlags::intersectFlags(const VPIRFlags &Other) {
case OperationType::Cmp:
assert(CmpPredicate == Other.CmpPredicate && "Cannot drop CmpPredicate");
break;
+ case OperationType::ReductionOp:
+ assert(ReductionFlags.Kind == Other.ReductionFlags.Kind &&
+ "Cannot change RecurKind");
+ assert(ReductionFlags.IsOrdered == Other.ReductionFlags.IsOrdered &&
+ "Cannot change IsOrdered");
+ assert(ReductionFlags.IsInLoop == Other.ReductionFlags.IsInLoop &&
+ "Cannot change IsInLoop");
+ getFMFsRef().NoNaNs &= Other.getFMFsRef().NoNaNs;
+ getFMFsRef().NoInfs &= Other.getFMFsRef().NoInfs;
+ break;
case OperationType::Other:
assert(AllFlags == Other.AllFlags && "Cannot drop other flags");
break;
@@ -355,7 +365,8 @@ void VPIRFlags::intersectFlags(const VPIRFlags &Other) {
}
FastMathFlags VPIRFlags::getFastMathFlags() const {
- assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp) &&
+ assert((OpType == OperationType::FPMathOp || OpType == OperationType::FCmp ||
+ OpType == OperationType::ReductionOp) &&
"recipe doesn't have fast math flags");
const FastMathFlagsTy &F = getFMFsRef();
FastMathFlags Res;
@@ -455,7 +466,6 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) {
case Instruction::Store:
case VPInstruction::BranchOnCount:
case VPInstruction::BranchOnTwoConds:
- case VPInstruction::ComputeReductionResult:
case VPInstruction::ExtractLane:
case VPInstruction::FirstOrderRecurrenceSplice:
case VPInstruction::LogicalAnd:
@@ -475,6 +485,7 @@ unsigned VPInstruction::getNumOperandsForOpcode(unsigned Opcode) {
case Instruction::PHI:
case Instruction::Switch:
case VPInstruction::AnyOf:
+ case VPInstruction::ComputeReductionResult:
case VPInstruction::FirstActiveLane:
case VPInstruction::LastActiveLane:
case VPInstruction::SLPLoad:
@@ -748,21 +759,16 @@ Value *VPInstruction::generate(VPTransformState &State) {
Sentinel);
}
case VPInstruction::ComputeReductionResult: {
- // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
- // and will be removed by breaking up the recipe further.
- auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
- // Get its reduction variable descriptor.
-
- RecurKind RK = PhiR->getRecurrenceKind();
+ RecurKind RK = getRecurKind();
+ bool IsOrdered = isReductionOrdered();
+ bool IsInLoop = isReductionInLoop();
assert(!RecurrenceDescriptor::isFindIVRecurrenceKind(RK) &&
"should be handled by ComputeFindIVResult");
- // The recipe's operands are the reduction phi, followed by one operand for
- // each part of the reduction.
- unsigned UF = getNumOperands() - 1;
+ unsigned UF = getNumOperands();
VectorParts RdxParts(UF);
for (unsigned Part = 0; Part < UF; ++Part)
- RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
+ RdxParts[Part] = State.get(getOperand(Part), IsInLoop);
IRBuilderBase::FastMathFlagGuard FMFG(Builder);
if (hasFastMathFlags())
@@ -770,7 +776,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
// Reduce all of the unrolled parts into a single vector.
Value *ReducedPartRdx = RdxParts[0];
- if (PhiR->isOrdered()) {
+ if (IsOrdered) {
ReducedPartRdx = RdxParts[UF - 1];
} else {
// Floating-point operations should have some FMF to enable the reduction.
@@ -793,7 +799,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
// Create the reduction after the loop. Note that inloop reductions create
// the target reduction in the loop using a Reduction recipe.
- if (State.VF.isVector() && !PhiR->isInLoop()) {
+ if (State.VF.isVector() && !IsInLoop) {
// TODO: Support in-order reductions based on the recurrence descriptor.
// All ops in the reduction inherit fast-math-flags from the recurrence
// descriptor.
@@ -2020,14 +2026,15 @@ bool VPIRFlags::flagsValidForOpcode(unsigned Opcode) const {
Opcode == Instruction::FRem || Opcode == Instruction::FPExt ||
Opcode == Instruction::FPTrunc || Opcode == Instruction::Select ||
Opcode == VPInstruction::WideIVStep ||
- Opcode == VPInstruction::ReductionStartVector ||
- Opcode == VPInstruction::ComputeReductionResult;
+ Opcode == VPInstruction::ReductionStartVector;
case OperationType::FCmp:
return Opcode == Instruction::FCmp;
case OperationType::NonNegOp:
return Opcode == Instruction::ZExt || Opcode == Instruction::UIToFP;
case OperationType::Cmp:
return Opcode == Instruction::FCmp || Opcode == Instruction::ICmp;
+ case OperationType::ReductionOp:
+ return Opcode == VPInstruction::ComputeReductionResult;
case OperationType::Other:
return true;
}
@@ -2080,6 +2087,18 @@ void VPIRFlags::printFlags(raw_ostream &O) const {
if (NonNegFlags.NonNeg)
O << " nneg";
break;
+ case OperationType::ReductionOp: {
+ RecurKind RK = static_cast<RecurKind>(ReductionFlags.Kind);
+ O << " ("
+ << Instruction::getOpcodeName(RecurrenceDescriptor::getOpcode(RK));
+ if (ReductionFlags.IsInLoop)
+ O << ", in-loop";
+ if (ReductionFlags.IsOrdered)
+ O << ", ordered";
+ O << ")";
+ getFastMathFlags().print(O);
+ break;
+ }
case OperationType::Other:
break;
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
index 8198945764936..2488951e844c6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp
@@ -371,7 +371,7 @@ void UnrollState::unrollBlock(VPBlockBase *VPB) {
match(&R, m_VPInstruction<VPInstruction::ComputeAnyOfResult>(
m_VPValue(), m_VPValue(), m_VPValue(Op1))) ||
match(&R, m_VPInstruction<VPInstruction::ComputeReductionResult>(
- m_VPValue(), m_VPValue(Op1))) ||
+ m_VPValue(Op1))) ||
match(&R, m_VPInstruction<VPInstruction::ComputeFindIVResult>(
m_VPValue(), m_VPValue(), m_VPValue(), m_VPValue(Op1)))) {
addUniformForAllParts(cast<VPInstruction>(&R));
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.h b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
index 4e7ed1f5a4ab7..e3c2a062a8b97 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.h
@@ -103,6 +103,7 @@ inline VPIRFlags getFlagsFromIndDesc(const InductionDescriptor &ID) {
"Expected int induction");
return VPIRFlags::WrapFlagsTy(false, false);
}
+
} // namespace vputils
//===----------------------------------------------------------------------===//
@@ -254,7 +255,6 @@ class VPBlockUtils {
/// Returns true if \p VPB is a loop latch, using isHeader().
static bool isLatch(const VPBlockBase *VPB, const VPDominatorTree &VPDT);
};
-
} // namespace llvm
#endif
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
index 32ee9a0142a7b..65a1a6e64a40e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
@@ -39,7 +39,7 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) "target-features"="+neon,+do
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CH...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/174026
More information about the llvm-commits
mailing list