[llvm] [AMDGPU] prevent shrinking udiv/urem if either operand is in (SignedMax,UnsignedMax] (PR #116733)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 20 18:03:59 PST 2024


================
@@ -1193,19 +1193,35 @@ int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
                                             Value *Den, unsigned AtLeast,
                                             bool IsSigned) const {
   const DataLayout &DL = Mod->getDataLayout();
-  unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
-  if (LHSSignBits < AtLeast)
-    return -1;
-
-  unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
-  if (RHSSignBits < AtLeast)
-    return -1;
-
-  unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
-  unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
-  if (IsSigned)
-    ++DivBits;
-  return DivBits;
+  if (IsSigned) {
+    unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
+    if (LHSSignBits < AtLeast)
+      return -1;
+
+    unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
+    if (RHSSignBits < AtLeast)
+      return -1;
+
+    unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
+    unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
+    return DivBits + 1;
+  } else {
+    KnownBits Known = computeKnownBits(Num, DL, 0, AC, &I);
+    // We know all bits are used for division for Num or Den in range
+    // (SignedMax, UnsignedMax]
+    if (Known.isNegative() || !Known.isNonNegative())
+      return -1;
----------------
arsenm wrote:

Is this reproducing the logic of computeKnownBits for division? Can you just do KnownLHS.udiv/urem(KnowRHS)? 

https://github.com/llvm/llvm-project/pull/116733


More information about the llvm-commits mailing list