[llvm] [VPlan] Perform optimizeMaskToEVL in terms of pattern matching (PR #155394)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 16 09:05:11 PDT 2025
================
@@ -2410,90 +2410,98 @@ void VPlanTransforms::addActiveLaneMask(
HeaderMask->eraseFromParent();
}
+template <typename Op0_t, typename Op1_t> struct RemoveMask_match {
+ Op0_t In;
+ Op1_t &Out;
+
+ RemoveMask_match(const Op0_t &In, Op1_t &Out) : In(In), Out(Out) {}
+
+ template <typename OpTy> bool match(OpTy *V) const {
+ if (m_Specific(In).match(V)) {
+ Out = nullptr;
+ return true;
+ }
+ if (m_LogicalAnd(m_Specific(In), m_VPValue(Out)).match(V))
+ return true;
+ return false;
+ }
+};
+
+/// Match a specific mask \p In, or a combination of it (logical-and In, Out).
+/// Returns the remaining part \p Out if so, or nullptr otherwise.
+template <typename Op0_t, typename Op1_t>
+static inline RemoveMask_match<Op0_t, Op1_t> m_RemoveMask(const Op0_t &In,
+ Op1_t &Out) {
+ return RemoveMask_match<Op0_t, Op1_t>(In, Out);
+}
+
/// Try to optimize a \p CurRecipe masked by \p HeaderMask to a corresponding
/// EVL-based recipe without the header mask. Returns nullptr if no EVL-based
/// recipe could be created.
/// \p HeaderMask Header Mask.
/// \p CurRecipe Recipe to be transform.
/// \p TypeInfo VPlan-based type analysis.
-/// \p AllOneMask The vector mask parameter of vector-predication intrinsics.
/// \p EVL The explicit vector length parameter of vector-predication
/// intrinsics.
static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
VPRecipeBase &CurRecipe,
- VPTypeAnalysis &TypeInfo,
- VPValue &AllOneMask, VPValue &EVL) {
- // FIXME: Don't transform recipes to EVL recipes if they're not masked by the
- // header mask.
- auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
- assert(OrigMask && "Unmasked recipe when folding tail");
- // HeaderMask will be handled using EVL.
- VPValue *Mask;
- if (match(OrigMask, m_LogicalAnd(m_Specific(HeaderMask), m_VPValue(Mask))))
- return Mask;
- return HeaderMask == OrigMask ? nullptr : OrigMask;
- };
+ VPTypeAnalysis &TypeInfo, VPValue &EVL) {
+ VPlan *Plan = CurRecipe.getParent()->getPlan();
+ VPValue *Addr, *Mask, *EndPtr;
/// Adjust any end pointers so that they point to the end of EVL lanes not VF.
- auto GetNewAddr = [&CurRecipe, &EVL](VPValue *Addr) -> VPValue * {
- auto *EndPtr = dyn_cast<VPVectorEndPointerRecipe>(Addr);
- if (!EndPtr)
- return Addr;
- assert(EndPtr->getOperand(1) == &EndPtr->getParent()->getPlan()->getVF() &&
- "VPVectorEndPointerRecipe with non-VF VF operand?");
- assert(
- all_of(EndPtr->users(),
- [](VPUser *U) {
- return cast<VPWidenMemoryRecipe>(U)->isReverse();
- }) &&
- "VPVectorEndPointRecipe not used by reversed widened memory recipe?");
- VPVectorEndPointerRecipe *EVLAddr = EndPtr->clone();
- EVLAddr->insertBefore(&CurRecipe);
- EVLAddr->setOperand(1, &EVL);
- return EVLAddr;
+ auto AdjustEndPtr = [&CurRecipe, &EVL](VPValue *EndPtr) {
+ auto *EVLEndPtr = cast<VPVectorEndPointerRecipe>(EndPtr)->clone();
+ EVLEndPtr->insertBefore(&CurRecipe);
+ EVLEndPtr->setOperand(1, &EVL);
+ return EVLEndPtr;
};
- return TypeSwitch<VPRecipeBase *, VPRecipeBase *>(&CurRecipe)
- .Case<VPWidenLoadRecipe>([&](VPWidenLoadRecipe *L) {
- VPValue *NewMask = GetNewMask(L->getMask());
- VPValue *NewAddr = GetNewAddr(L->getAddr());
- return new VPWidenLoadEVLRecipe(*L, NewAddr, EVL, NewMask);
- })
- .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) {
- VPValue *NewMask = GetNewMask(S->getMask());
- VPValue *NewAddr = GetNewAddr(S->getAddr());
- return new VPWidenStoreEVLRecipe(*S, NewAddr, EVL, NewMask);
- })
- .Case<VPInterleaveRecipe>([&](VPInterleaveRecipe *IR) {
- VPValue *NewMask = GetNewMask(IR->getMask());
- return new VPInterleaveEVLRecipe(*IR, EVL, NewMask);
- })
- .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
- VPValue *NewMask = GetNewMask(Red->getCondOp());
- return new VPReductionEVLRecipe(*Red, EVL, NewMask);
- })
- .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
- VPValue *LHS, *RHS;
- // Transform select with a header mask condition
- // select(header_mask, LHS, RHS)
- // into vector predication merge.
- // vp.merge(all-true, LHS, RHS, EVL)
- if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS),
- m_VPValue(RHS))))
- return nullptr;
- // Use all true as the condition because this transformation is
- // limited to selects whose condition is a header mask.
- return new VPWidenIntrinsicRecipe(
- Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL},
- TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
- })
- .Default([&](VPRecipeBase *R) { return nullptr; });
+ if (match(&CurRecipe,
+ m_Load(m_VPValue(Addr), m_RemoveMask(HeaderMask, Mask))))
+ return new VPWidenLoadEVLRecipe(cast<VPWidenLoadRecipe>(CurRecipe), Addr,
+ EVL, Mask);
+
+ if (match(&CurRecipe,
+ m_ReverseLoad(m_VPValue(EndPtr), m_RemoveMask(HeaderMask, Mask))) &&
+ match(EndPtr, m_VecEndPtr(m_VPValue(Addr), m_Specific(&Plan->getVF()))))
+ return new VPWidenLoadEVLRecipe(cast<VPWidenLoadRecipe>(CurRecipe),
+ AdjustEndPtr(EndPtr), EVL, Mask);
----------------
lukel97 wrote:
> My first question is: why do we need to additionally check whether the address is a VPVectorEndPointer for reverse accesses? Isn’t the whole reason for transforming VPVectorEndPointer(ptr, VF) into VPVectorEndPointer(ptr, EVL) that, once the header mask is reversed, non-active lanes are shifted to the beginning, while the VP intrinsic with EVL can only mask off the tail lanes and not the head lanes?
Exactly, that's why it's not correct to transform a reverse VPWidenLoadRecipe w/ a non-VPVectorEndPointer address to a reverse VPWidenLoadEVLRecipe. Consider the case with an arbitrary address that we don't call `AdjustEndPtr` on:
```
%headermask = [1, 1, 0, 0]
%load = VPWidenLoadRecipe(%ptr, %headermask, reverse=true)
= reverse([poison, poison, *(ptr + 2), *(ptr + 3)])
= [*(ptr + 3), *(ptr + 2), poison, poison]
--->
VPWidenLoadEVLRecipe(%ptr, null, reverse=true, evl=2)
= vp.reverse([*ptr, *(ptr + 1), poison, poison], evl=2)
= [*(ptr + 1), *ptr, poison, poison]
```
But with a VPVectorEndPointer that we adjust it's correct:
```
%headermask = [1, 1, 0, 0]
%load = VPWidenLoadRecipe(VPVectorEndPointer(%ptr, VF), %headermask, reverse=true)
= reverse([poison, poison, *(ptr - (VF - 1) + 2), *(ptr - (VF - 1) + 3)])
= reverse([poison, poison, *(ptr - 1), *ptr])
= [*ptr, *(ptr - 1), poison, poison]
--->
VPWidenLoadEVLRecipe(VPVectorEndPointer(%ptr, 2), null, reverse=true, evl=2)
= vp.reverse([*(ptr - (2 - 1)), *(ptr - (2 - 1) + 1), poison, poison], evl=2)
= vp.reverse([*(ptr - 1), *ptr, poison, poison], evl=2)
= [*ptr, *(ptr - 1), poison, poison]
```
> My second question is: if we keep the check on both isReverse and m_VPVectorEndPointer, do we actually have any cases is a reverse access with a non-VPVectorEndPointer address?
Hopefully not, I think this is an invariant as you suggest. I'll try out the assert that you wrote above and see if it triggers.
Enjoy your vacation by the way, see you in October :)
https://github.com/llvm/llvm-project/pull/155394
More information about the llvm-commits
mailing list