[llvm] [VPlan] Perform optimizeMaskToEVL in terms of pattern matching (PR #155394)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 9 00:51:47 PDT 2025


================
@@ -587,6 +588,79 @@ m_DerivedIV(const Op0_t &Op0, const Op1_t &Op1, const Op2_t &Op2) {
   return VPDerivedIV_match<Op0_t, Op1_t, Op2_t>({Op0, Op1, Op2});
 }
 
+template <typename Addr_t, typename Mask_t, bool Reverse> struct Load_match {
+  Addr_t Addr;
+  Mask_t Mask;
+
+  Load_match(Addr_t Addr, Mask_t Mask) : Addr(Addr), Mask(Mask) {}
+
+  template <typename OpTy> bool match(const OpTy *V) const {
+    auto *Load = dyn_cast<VPWidenLoadRecipe>(V);
+    if (!Load || Load->isReverse() != Reverse || !Addr.match(Load->getAddr()) ||
+        !Load->isMasked() || !Mask.match(Load->getMask()))
+      return false;
+    return true;
+  }
+};
+
+/// Match a non-reversed masked load.
+template <typename Addr_t, typename Mask_t>
+inline Load_match<Addr_t, Mask_t, false> m_Load(const Addr_t &Addr,
+                                                const Mask_t &Mask) {
+  return Load_match<Addr_t, Mask_t, false>(Addr, Mask);
+}
+
+/// Match a reversed masked load.
+template <typename Addr_t, typename Mask_t>
+inline Load_match<Addr_t, Mask_t, true> m_ReverseLoad(const Addr_t &Addr,
+                                                      const Mask_t &Mask) {
+  return Load_match<Addr_t, Mask_t, true>(Addr, Mask);
+}
+
+template <typename Addr_t, typename Val_t, typename Mask_t, bool Reverse>
+struct Store_match {
+  Addr_t Addr;
+  Val_t Val;
+  Mask_t Mask;
+
+  Store_match(Addr_t Addr, Val_t Val, Mask_t Mask)
+      : Addr(Addr), Val(Val), Mask(Mask) {}
+
+  template <typename OpTy> bool match(const OpTy *V) const {
+    auto *Store = dyn_cast<VPWidenStoreRecipe>(V);
+    if (!Store || Store->isReverse() != Reverse ||
----------------
lukel97 wrote:

My plan was that we would land this first and rebase https://github.com/llvm/llvm-project/pull/146525 on top of it.
https://github.com/llvm/llvm-project/pull/146525 would then remove m_ReverseLoad/m_ReverseStore. 

This way you wouldn't need to separately handle the reverse addresses here https://github.com/llvm/llvm-project/pull/146525/files#diff-53267225b83e943ceae51c326c9941e323fd7aaf74a08b5e6998d6456f88d1ddR2628-R2659

Instead you would only need to adjust the reverse pattern in optimizeMaskToEVL:

```c++
  if (match(&CurRecipe,
            m_Reverse(m_Load(m_VPValue(EndPtr), m_RemoveMask(HeaderMask, Mask)))) &&
      match(EndPtr, m_VecEndPtr(m_VPValue(Addr), m_Specific(&Plan->getVF())))) {
    auto *Load = new VPWidenLoadEVLRecipe(cast<VPWidenLoadRecipe>(CurRecipe),
                                    AdjustEndPtr(EndPtr), EVL, Mask);
    return Builder.createVPReverse(Load, EVL);
   }
```

https://github.com/llvm/llvm-project/pull/155394


More information about the llvm-commits mailing list