[llvm] [LoopVectorize] Generate wide active lane masks (PR #147535)

Kerry McLaughlin via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 30 06:35:30 PDT 2025


================
@@ -1432,20 +1433,93 @@ static bool isConditionTrueViaVFAndUF(VPValue *Cond, VPlan &Plan,
   return SE.isKnownPredicate(CmpInst::ICMP_EQ, TripCount, C);
 }
 
+static void extractFromWideActiveLaneMask(VPlan &Plan, ElementCount VF,
+                                          unsigned UF) {
+  VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
+  auto *Header = cast<VPBasicBlock>(VectorRegion->getEntry());
+  VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
+  auto *Term = &ExitingVPBB->back();
+
+  VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
+  LLVMContext &Ctx = CanonicalIV->getScalarType()->getContext();
+  using namespace llvm::VPlanPatternMatch;
+
+  auto extractFromALM = [&](VPInstruction *ALM, VPInstruction *InsBefore,
+                            SmallVectorImpl<VPValue *> &Extracts) {
+    VPBuilder Builder(InsBefore);
+    DebugLoc DL = ALM->getDebugLoc();
+    for (unsigned Part = 0; Part < UF; ++Part) {
+      SmallVector<VPValue *> Ops;
+      Ops.append({ALM, Plan.getOrAddLiveIn(
+                           ConstantInt::get(IntegerType::getInt64Ty(Ctx),
+                                            VF.getKnownMinValue() * Part))});
+      Extracts.push_back(
+          Builder.createNaryOp(VPInstruction::ExtractSubvector, Ops, DL));
+    }
+  };
+
+  // Create a list of each active lane mask phi, ordered by unroll part.
+  SmallVector<VPActiveLaneMaskPHIRecipe *> Phis(UF, nullptr);
+  for (VPRecipeBase &R : Header->phis())
+    if (auto *Phi = dyn_cast<VPActiveLaneMaskPHIRecipe>(&R))
+      Phis[Phi->getUnrollPart()] = Phi;
+
+  assert(all_of(Phis, [](VPActiveLaneMaskPHIRecipe *Phi) { return Phi; }) &&
+         "Expected one VPActiveLaneMaskPHIRecipe for each unroll part");
+
+  // When using wide lane masks, the return type of the get.active.lane.mask
+  // intrinsic is VF x UF (second operand).
+  VPValue *ALMMultiplier =
+      Plan.getOrAddLiveIn(ConstantInt::get(IntegerType::getInt64Ty(Ctx), UF));
+  cast<VPInstruction>(Phis[0]->getStartValue())->setOperand(2, ALMMultiplier);
+  cast<VPInstruction>(Phis[0]->getBackedgeValue())
+      ->setOperand(2, ALMMultiplier);
+
+  // Create UF x extract vectors and insert into preheader.
+  SmallVector<VPValue *> EntryExtracts;
+  auto *EntryALM = cast<VPInstruction>(Phis[0]->getStartValue());
+  extractFromALM(EntryALM, cast<VPInstruction>(&EntryALM->getParent()->back()),
+                 EntryExtracts);
+
+  // Create UF x extract vectors and insert before the loop compare & branch,
+  // updating the compare to use the first extract.
+  SmallVector<VPValue *> LoopExtracts;
+  auto *LoopALM = cast<VPInstruction>(Phis[0]->getBackedgeValue());
+  VPInstruction *Not = cast<VPInstruction>(Term->getOperand(0));
+  extractFromALM(LoopALM, Not, LoopExtracts);
+  Not->setOperand(0, LoopExtracts[0]);
+
+  // Update the incoming values of active lane mask phis.
+  for (unsigned Part = 0; Part < UF; ++Part) {
+    Phis[Part]->setStartValue(EntryExtracts[Part]);
+    Phis[Part]->setBackedgeValue(LoopExtracts[Part]);
+  }
+
+  return;
+}
+
 /// Try to simplify the branch condition of \p Plan. This may restrict the
 /// resulting plan to \p BestVF and \p BestUF.
-static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
-                                              unsigned BestUF,
-                                              PredicatedScalarEvolution &PSE) {
+static bool
+simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
+                                  unsigned BestUF,
+                                  PredicatedScalarEvolution &PSE,
+                                  bool DataAndControlFlowWithoutRuntimeCheck) {
----------------
kmclaughlin-arm wrote:

There's nothing preventing other active lane masks being from widened similarly if the flag is set. I restricted the transform like this because if either `DataAndControlFlowWithoutRuntimeCheck` is true or `IVUpdateMayOverflow` is false, then we know that splitting the wider mask later could be done without worrying about overflow.

However, since there is nothing yet which checks if the start value may overflow during codegen of the `get_active_lane_mask` node, I don't think it's adding any value checking this here. The decision on whether we should generate the wider mask may need to take the tail folding style into account in the future but the transform itself doesn't require it, so I have removed it from this patch.

https://github.com/llvm/llvm-project/pull/147535


More information about the llvm-commits mailing list