[llvm] [GlobalISel] Handle div-by-pow2 (PR #83155)

Shilei Tian via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 18 06:59:49 PDT 2024


================
@@ -5286,6 +5286,157 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
   return MIB.buildMul(Ty, Res, Factor);
 }
 
+bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
+  assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
+          MI.getOpcode() == TargetOpcode::G_UDIV) &&
+         "Expected SDIV or UDIV");
+  auto &Div = cast<GenericMachineInstr>(MI);
+  Register RHS = Div.getReg(2);
+  auto MatchPow2 = [&](const Constant *C) {
+    auto *CI = dyn_cast<ConstantInt>(C);
+    if (!CI)
+      return false;
+    return CI->getValue().isPowerOf2() ||
+           (IsSigned && CI->getValue().isNegatedPowerOf2());
+  };
+  return matchUnaryPredicate(MRI, RHS, MatchPow2, /*AllowUndefs=*/false);
+}
+
+void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
+  assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
+  auto &SDiv = cast<GenericMachineInstr>(MI);
+  Register Dst = SDiv.getReg(0);
+  Register LHS = SDiv.getReg(1);
+  Register RHS = SDiv.getReg(2);
+  LLT Ty = MRI.getType(Dst);
+  LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty);
+
+  Builder.setInstrAndDebugLoc(MI);
+
+  // Effectively we want to lower G_SDIV %lhs, %rhs, where %rhs is a power of 2,
+  // to the following version:
+  //
+  // %bits = G_CONSTANT $bitwidth
+  // %c1 = G_CTTZ %rhs
+  // %c1 = G_ZEXT %c1
+  // %inexact = G_SUB %bits, %c1
+  // %tmp = G_CONSTANT ($bitwidth - 1)
+  // %sign = %G_ASHR %lhs, %tmp
+  // %srl = G_SHL %sign, %inexact
+  // %add = G_ADD %lhs, %srl
+  // %sra = G_ASHR %add, %c1
+  // %one = G_CONSTANT $1
+  // %allones = G_CONSTANT $111...11
+  // %isone = G_ICMP EQ %rhs, %one
+  // %isallones = G_ICMP EQ %rhs, %allones
+  // %isoneorallones = G_OR %isone, %isallones
+  // %sra = G_SELECT, %isoneorallones, %lhs, %sra
+  // %zero = G_CONSTANT $0
+  // %sub = G_SUB %zero, %sra
+  // %isneg = G_ICMP SLT %lhs, %zero
+  // %res = G_SELECT %isneg, %sub, %sra
+  //
+  // When %rhs is a constant integer, or a splat vector, we can check its value
+  // at compile time such that the first two G_ICMP conditional statements, as
+  // well as the corresponding non-taken branches, can be eliminated. This can
+  // generate compact code even w/o any constant folding afterwards. When $rhs
+  // is not a splat vector, we have to generate those checks via instructions.
+
+  unsigned Bitwidth = Ty.getScalarSizeInBits();
+  auto Zero = Builder.buildConstant(Ty, 0);
+
+  // TODO: It is not necessary to have this specialized version. We need it *for
+  // now* because the folding/combine can't handle it. Remove this large
+  // conditional statement once we can properly fold the two G_ICMP.
+  if (auto RHSC = getConstantOrConstantSplatVector(RHS)) {
+    // Special case: (sdiv X, 1) -> X
+    if (RHSC->isOne()) {
+      replaceSingleDefInstWithReg(MI, LHS);
+      return;
+    }
+    // Special Case: (sdiv X, -1) -> 0-X
+    if (RHSC->isAllOnes()) {
+      auto Sub = Builder.buildSub(Ty, Zero, LHS);
+      replaceSingleDefInstWithReg(MI, Sub->getOperand(0).getReg());
+      return;
+    }
+
+    unsigned TrailingZeros = RHSC->countTrailingZeros();
+    auto C1 = Builder.buildConstant(ShiftAmtTy, TrailingZeros);
+    auto Inexact = Builder.buildConstant(ShiftAmtTy, Bitwidth - TrailingZeros);
+    auto Sign = Builder.buildAShr(
+        Ty, LHS, Builder.buildConstant(ShiftAmtTy, Bitwidth - 1));
+    // Add (LHS < 0) ? abs2 - 1 : 0;
----------------
shiltian wrote:

Not sure. That is from `llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp:4667`. My educated guess here is to get the two's complement via add w/o adding a condition.

https://github.com/llvm/llvm-project/pull/83155


More information about the llvm-commits mailing list