[llvm] [NFC][LV]: Differentiate between Pred and Unpred masked operations. (PR #169509)
Hassnaa Hamdi via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 25 11:07:52 PST 2025
https://github.com/hassnaaHamdi updated https://github.com/llvm/llvm-project/pull/169509
>From 61441fe6581a4212dd80d1b4aa826ed71895a4d6 Mon Sep 17 00:00:00 2001
From: Hassnaa Hamdi <hassnaa.hamdi at arm.com>
Date: Tue, 25 Nov 2025 15:21:48 +0000
Subject: [PATCH] [NFC][LV]: Differentiate between Pred and Unpred operations
---
.../Vectorize/LoopVectorizationLegality.h | 18 +++++++----
.../Vectorize/LoopVectorizationLegality.cpp | 6 ++--
.../Transforms/Vectorize/LoopVectorize.cpp | 30 ++++++++++---------
3 files changed, 32 insertions(+), 22 deletions(-)
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index ecbd0ef7df5e5..3827bf3a359e3 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -428,8 +428,10 @@ class LoopVectorizationLegality {
/// Returns true if vector representation of the instruction \p I
/// requires mask.
- bool isMaskRequired(const Instruction *I) const {
- return MaskedOp.contains(I);
+ bool isMaskRequired(const Instruction *I, bool LoopPredicated) const {
+ if (LoopPredicated)
+ return PredMaskedOps.contains(I);
+ return UnpredMaskedOps.contains(I);
}
/// Returns true if there is at least one function call in the loop which
@@ -705,9 +707,15 @@ class LoopVectorizationLegality {
AssumptionCache *AC;
/// While vectorizing these instructions we have to generate a
- /// call to the appropriate masked intrinsic or drop them in case of
- /// conditional assumes.
- SmallPtrSet<const Instruction *, 8> MaskedOp;
+ /// call to the appropriate masked intrinsic or drop them.
+ /// To differentiate between needing masked op because there is a conditional
+ /// executed block or because of predicated loop, we keep two lists:
+ /// 1) UnpredMaskedOp - instructions that need masking if we are
+ /// in conditionally executed block.
+ /// 2) PredMaskedOp - instructions that need masking if we are in a predicated
+ /// loop.
+ SmallPtrSet<const Instruction *, 8> UnpredMaskedOps;
+ SmallPtrSet<const Instruction *, 8> PredMaskedOps;
/// Contains all identified histogram operations, which are sequences of
/// load -> update -> store instructions where multiple lanes in a vector
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 86e742ca5fec1..2f1996dc288f5 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -1590,7 +1590,7 @@ bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
// We must be able to predicate all blocks that need to be predicated.
if (blockNeedsPredication(BB) &&
- !blockCanBePredicated(BB, SafePointers, MaskedOp)) {
+ !blockCanBePredicated(BB, SafePointers, UnpredMaskedOps)) {
reportVectorizationFailure(
"Control flow cannot be substituted for a select", "NoCFGForSelect",
ORE, TheLoop, BB->getTerminator());
@@ -2147,11 +2147,11 @@ bool LoopVectorizationLegality::canFoldTailByMasking() const {
void LoopVectorizationLegality::prepareToFoldTailByMasking() {
// The list of pointers that we can safely read and write to remains empty.
SmallPtrSet<Value *, 8> SafePointers;
-
// Mark all blocks for predication, including those that ordinarily do not
// need predication such as the header block.
for (BasicBlock *BB : TheLoop->blocks()) {
- [[maybe_unused]] bool R = blockCanBePredicated(BB, SafePointers, MaskedOp);
+ [[maybe_unused]] bool R =
+ blockCanBePredicated(BB, SafePointers, PredMaskedOps);
assert(R && "Must be able to predicate block when tail-folding.");
}
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 8013c47f53db0..cd9e5ef3eaf0b 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2838,7 +2838,8 @@ bool LoopVectorizationCostModel::isPredicatedInst(Instruction *I) const {
// TODO: We can use the loop-preheader as context point here and get
// context sensitive reasoning for isSafeToSpeculativelyExecute.
if (isSafeToSpeculativelyExecute(I) ||
- (isa<LoadInst, StoreInst, CallInst>(I) && !Legal->isMaskRequired(I)) ||
+ (isa<LoadInst, StoreInst, CallInst>(I) &&
+ !Legal->isMaskRequired(I, foldTailByMasking())) ||
isa<BranchInst, SwitchInst, PHINode, AllocaInst>(I))
return false;
@@ -2863,7 +2864,7 @@ bool LoopVectorizationCostModel::isPredicatedInst(Instruction *I) const {
case Instruction::Call:
// Side-effects of a Call are assumed to be non-invariant, needing a
// (fold-tail) mask.
- assert(Legal->isMaskRequired(I) &&
+ assert(Legal->isMaskRequired(I, foldTailByMasking()) &&
"should have returned earlier for calls not needing a mask");
return true;
case Instruction::Load:
@@ -2991,7 +2992,7 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
// load, or any gaps in a store-access).
bool PredicatedAccessRequiresMasking =
blockNeedsPredicationForAnyReason(I->getParent()) &&
- Legal->isMaskRequired(I);
+ Legal->isMaskRequired(I, foldTailByMasking());
bool LoadAccessWithGapsRequiresEpilogMasking =
isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
!isScalarEpilogueAllowed();
@@ -5250,7 +5251,7 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
"Stride should be 1 or -1 for consecutive memory access");
const Align Alignment = getLoadStoreAlignment(I);
InstructionCost Cost = 0;
- if (Legal->isMaskRequired(I)) {
+ if (Legal->isMaskRequired(I, foldTailByMasking())) {
unsigned IID = I->getOpcode() == Instruction::Load
? Intrinsic::masked_load
: Intrinsic::masked_store;
@@ -5314,9 +5315,10 @@ LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
PtrTy = toVectorTy(PtrTy, VF);
return TTI.getAddressComputationCost(PtrTy, nullptr, nullptr, CostKind) +
- TTI.getGatherScatterOpCost(I->getOpcode(), VectorTy, Ptr,
- Legal->isMaskRequired(I), Alignment,
- CostKind, I);
+ TTI.getGatherScatterOpCost(
+ I->getOpcode(), VectorTy, Ptr,
+ Legal->isMaskRequired(I, foldTailByMasking()), Alignment, CostKind,
+ I);
}
InstructionCost
@@ -5345,12 +5347,12 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
(isa<StoreInst>(I) && !Group->isFull());
InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
- Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
- UseMaskForGaps);
+ Group->getAlign(), AS, CostKind,
+ Legal->isMaskRequired(I, foldTailByMasking()), UseMaskForGaps);
if (Group->isReverse()) {
// TODO: Add support for reversed masked interleaved access.
- assert(!Legal->isMaskRequired(I) &&
+ assert(!Legal->isMaskRequired(I, foldTailByMasking()) &&
"Reverse masked interleaved access not supported.");
Cost += Group->getNumMembers() *
TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy,
@@ -5888,7 +5890,7 @@ void LoopVectorizationCostModel::setVectorizedCallDecision(ElementCount VF) {
continue;
}
- bool MaskRequired = Legal->isMaskRequired(CI);
+ bool MaskRequired = Legal->isMaskRequired(CI, foldTailByMasking());
// Compute corresponding vector type for return value and arguments.
Type *RetTy = toVectorizedTy(ScalarRetTy, VF);
for (Type *ScalarTy : ScalarTys)
@@ -7578,7 +7580,7 @@ VPWidenMemoryRecipe *VPRecipeBuilder::tryToWidenMemory(VPInstruction *VPI,
return nullptr;
VPValue *Mask = nullptr;
- if (Legal->isMaskRequired(I))
+ if (Legal->isMaskRequired(I, CM.foldTailByMasking()))
Mask = getBlockInMask(Builder.getInsertBlock());
// Determine if the pointer operand of the access is either consecutive or
@@ -7796,7 +7798,7 @@ VPSingleDefRecipe *VPRecipeBuilder::tryToWidenCall(VPInstruction *VPI,
// vector variant at this VF requires a mask, so we synthesize an
// all-true mask.
VPValue *Mask = nullptr;
- if (Legal->isMaskRequired(CI))
+ if (Legal->isMaskRequired(CI, CM.foldTailByMasking()))
Mask = getBlockInMask(Builder.getInsertBlock());
else
Mask = Plan.getOrAddLiveIn(
@@ -7919,7 +7921,7 @@ VPHistogramRecipe *VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
// In case of predicated execution (due to tail-folding, or conditional
// execution, or both), pass the relevant mask.
- if (Legal->isMaskRequired(HI->Store))
+ if (Legal->isMaskRequired(HI->Store, CM.foldTailByMasking()))
HGramOps.push_back(getBlockInMask(Builder.getInsertBlock()));
return new VPHistogramRecipe(Opcode, HGramOps, VPI->getDebugLoc());
More information about the llvm-commits
mailing list