[llvm-branch-commits] [llvm] release/20.x: [ValueTracking] Fix bit width handling in computeKnownBits() for GEPs (#125532) (PR #126496)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Feb 10 01:56:02 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-analysis
Author: None (llvmbot)
<details>
<summary>Changes</summary>
Backport 3dc1ef1650c8389a6f195a474781cf2281208bed 3bd11b502c1846afa5e1257c94b7a70566e34686
Requested by: @<!-- -->nikic
---
Full diff: https://github.com/llvm/llvm-project/pull/126496.diff
2 Files Affected:
- (modified) llvm/lib/Analysis/ValueTracking.cpp (+36-30)
- (modified) llvm/unittests/Analysis/ValueTrackingTest.cpp (+35)
``````````diff
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index b63a0a07f7de292..8a674914641a85c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1445,7 +1445,22 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// Accumulate the constant indices in a separate variable
// to minimize the number of calls to computeForAddSub.
- APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
+ unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
+ APInt AccConstIndices(IndexWidth, 0);
+
+ auto AddIndexToKnown = [&](KnownBits IndexBits) {
+ if (IndexWidth == BitWidth) {
+ // Note that inbounds does *not* guarantee nsw for the addition, as only
+ // the offset is signed, while the base address is unsigned.
+ Known = KnownBits::add(Known, IndexBits);
+ } else {
+ // If the index width is smaller than the pointer width, only add the
+ // value to the low bits.
+ assert(IndexWidth < BitWidth &&
+ "Index width can't be larger than pointer width");
+ Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0);
+ }
+ };
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
@@ -1483,43 +1498,34 @@ static void computeKnownBitsFromOperator(const Operator *I,
break;
}
- unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
- KnownBits IndexBits(IndexBitWidth);
- computeKnownBits(Index, IndexBits, Depth + 1, Q);
- TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL);
- uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
- KnownBits ScalingFactor(IndexBitWidth);
+ TypeSize Stride = GTI.getSequentialElementStride(Q.DL);
+ uint64_t StrideInBytes = Stride.getKnownMinValue();
+ if (!Stride.isScalable()) {
+ // Fast path for constant offset.
+ if (auto *CI = dyn_cast<ConstantInt>(Index)) {
+ AccConstIndices +=
+ CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes;
+ continue;
+ }
+ }
+
+ KnownBits IndexBits =
+ computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth);
+ KnownBits ScalingFactor(IndexWidth);
// Multiply by current sizeof type.
// &A[i] == A + i * sizeof(*A[i]).
- if (IndexTypeSize.isScalable()) {
+ if (Stride.isScalable()) {
// For scalable types the only thing we know about sizeof is
// that this is a multiple of the minimum size.
- ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
- } else if (IndexBits.isConstant()) {
- APInt IndexConst = IndexBits.getConstant();
- APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
- IndexConst *= ScalingFactor;
- AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
- continue;
+ ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes));
} else {
ScalingFactor =
- KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
+ KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes));
}
- IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
-
- // If the offsets have a different width from the pointer, according
- // to the language reference we need to sign-extend or truncate them
- // to the width of the pointer.
- IndexBits = IndexBits.sextOrTrunc(BitWidth);
-
- // Note that inbounds does *not* guarantee nsw for the addition, as only
- // the offset is signed, while the base address is unsigned.
- Known = KnownBits::add(Known, IndexBits);
- }
- if (!Known.isUnknown() && !AccConstIndices.isZero()) {
- KnownBits Index = KnownBits::makeConstant(AccConstIndices);
- Known = KnownBits::add(Known, Index);
+ AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor));
}
+ if (!Known.isUnknown() && !AccConstIndices.isZero())
+ AddIndexToKnown(KnownBits::makeConstant(AccConstIndices));
break;
}
case Instruction::PHI: {
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index ee44aac45594d1b..50e5e0e6b2ff5b9 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2679,6 +2679,41 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAbsoluteSymbol) {
EXPECT_EQ(0u, Known_0_256_Align8.countMinTrailingOnes());
}
+TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) {
+ // The index should be extended before multiplying with the scale.
+ parseAssembly(R"(
+ target datalayout = "p:16:16:16"
+
+ define void @test(i16 %arg) {
+ %and = and i16 %arg, u0x8000
+ %base = inttoptr i16 %and to ptr
+ %A = getelementptr i32, ptr %base, i8 80
+ ret void
+ }
+ )");
+ KnownBits Known = computeKnownBits(A, M->getDataLayout());
+ EXPECT_EQ(~320 & 0x7fff, Known.Zero);
+ EXPECT_EQ(320, Known.One);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) {
+ // GEP should only affect the index width.
+ parseAssembly(R"(
+ target datalayout = "p:16:16:16:8"
+
+ define void @test(i16 %arg) {
+ %and = and i16 %arg, u0x8000
+ %or = or i16 %and, u0x00ff
+ %base = inttoptr i16 %or to ptr
+ %A = getelementptr i8, ptr %base, i8 1
+ ret void
+ }
+ )");
+ KnownBits Known = computeKnownBits(A, M->getDataLayout());
+ EXPECT_EQ(0x7fff, Known.Zero);
+ EXPECT_EQ(0, Known.One);
+}
+
TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
{
// Check for an inverted mask: (X & ~M) op (Y & M).
``````````
</details>
https://github.com/llvm/llvm-project/pull/126496
More information about the llvm-branch-commits
mailing list