[llvm] [ValueTracking] Fix bit width handling in computeKnownBits() for GEPs (PR #125532)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 4 00:59:20 PST 2025
https://github.com/nikic updated https://github.com/llvm/llvm-project/pull/125532
>From e6240c5cdbd9a97606bee18515638f11dbbd9353 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Mon, 3 Feb 2025 15:33:45 +0100
Subject: [PATCH 1/2] [ValueTracking] Fix bit width handling in
computeKnownBits() for GEPs
For GEPs, we have three bit widths involved: The pointer bit width,
the index bit width, and the bit width of the GEP operands.
The correct behavior here is:
* We need to sextOrTrunc the GEP operand to the index width *before*
multiplying by the scale.
* If the index width and pointer width differ, GEP only ever
modifies the low bits. Adds should not overflow into the high
bits.
I'm testing this via unit tests because it's a bit tricky to test
in IR with InstCombine canonicalization getting in the way.
---
llvm/lib/Analysis/ValueTracking.cpp | 66 ++++++++++---------
llvm/unittests/Analysis/ValueTrackingTest.cpp | 12 ++--
2 files changed, 42 insertions(+), 36 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 6b61a3546e8b7c5..61a14f471e88fff 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1426,7 +1426,22 @@ static void computeKnownBitsFromOperator(const Operator *I,
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// Accumulate the constant indices in a separate variable
// to minimize the number of calls to computeForAddSub.
- APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
+ unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
+ APInt AccConstIndices(IndexWidth, 0);
+
+ auto AddIndex = [&](KnownBits IndexBits) {
+ if (IndexWidth == BitWidth) {
+ // Note that inbounds does *not* guarantee nsw for the addition, as only
+ // the offset is signed, while the base address is unsigned.
+ Known = KnownBits::add(Known, IndexBits);
+ } else {
+ // If the index width is smaller than the pointer width, only add the
+ // value to the low bits.
+ assert(IndexWidth < BitWidth &&
+ "Index width can't be larger than pointer width");
+ Known.insertBits(KnownBits::add(Known.trunc(IndexWidth), IndexBits), 0);
+ }
+ };
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
@@ -1464,43 +1479,34 @@ static void computeKnownBitsFromOperator(const Operator *I,
break;
}
- unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
- KnownBits IndexBits(IndexBitWidth);
- computeKnownBits(Index, IndexBits, Depth + 1, Q);
- TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL);
- uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
- KnownBits ScalingFactor(IndexBitWidth);
+ TypeSize Stride = GTI.getSequentialElementStride(Q.DL);
+ uint64_t StrideInBytes = Stride.getKnownMinValue();
+ if (!Stride.isScalable()) {
+ // Fast path for constant offset.
+ if (auto *CI = dyn_cast<ConstantInt>(Index)) {
+ AccConstIndices +=
+ CI->getValue().sextOrTrunc(IndexWidth) * StrideInBytes;
+ continue;
+ }
+ }
+
+ KnownBits IndexBits =
+ computeKnownBits(Index, Depth + 1, Q).sextOrTrunc(IndexWidth);
+ KnownBits ScalingFactor(IndexWidth);
// Multiply by current sizeof type.
// &A[i] == A + i * sizeof(*A[i]).
- if (IndexTypeSize.isScalable()) {
+ if (Stride.isScalable()) {
// For scalable types the only thing we know about sizeof is
// that this is a multiple of the minimum size.
- ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
- } else if (IndexBits.isConstant()) {
- APInt IndexConst = IndexBits.getConstant();
- APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
- IndexConst *= ScalingFactor;
- AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
- continue;
+ ScalingFactor.Zero.setLowBits(llvm::countr_zero(StrideInBytes));
} else {
ScalingFactor =
- KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
+ KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes));
}
- IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
-
- // If the offsets have a different width from the pointer, according
- // to the language reference we need to sign-extend or truncate them
- // to the width of the pointer.
- IndexBits = IndexBits.sextOrTrunc(BitWidth);
-
- // Note that inbounds does *not* guarantee nsw for the addition, as only
- // the offset is signed, while the base address is unsigned.
- Known = KnownBits::add(Known, IndexBits);
- }
- if (!Known.isUnknown() && !AccConstIndices.isZero()) {
- KnownBits Index = KnownBits::makeConstant(AccConstIndices);
- Known = KnownBits::add(Known, Index);
+ AddIndex(KnownBits::mul(IndexBits, ScalingFactor));
}
+ if (!Known.isUnknown() && !AccConstIndices.isZero())
+ AddIndex(KnownBits::makeConstant(AccConstIndices));
break;
}
case Instruction::PHI: {
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 39865fa195cf757..50e5e0e6b2ff5b9 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2680,7 +2680,7 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsAbsoluteSymbol) {
}
TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) {
- // FIXME: The index should be extended before multiplying with the scale.
+ // The index should be extended before multiplying with the scale.
parseAssembly(R"(
target datalayout = "p:16:16:16"
@@ -2692,12 +2692,12 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPExtendBeforeMul) {
}
)");
KnownBits Known = computeKnownBits(A, M->getDataLayout());
- EXPECT_EQ(~64 & 0x7fff, Known.Zero);
- EXPECT_EQ(64, Known.One);
+ EXPECT_EQ(~320 & 0x7fff, Known.Zero);
+ EXPECT_EQ(320, Known.One);
}
TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) {
- // FIXME: GEP should only affect the index width.
+ // GEP should only affect the index width.
parseAssembly(R"(
target datalayout = "p:16:16:16:8"
@@ -2710,8 +2710,8 @@ TEST_F(ComputeKnownBitsTest, ComputeKnownBitsGEPOnlyIndexBits) {
}
)");
KnownBits Known = computeKnownBits(A, M->getDataLayout());
- EXPECT_EQ(0x7eff, Known.Zero);
- EXPECT_EQ(0x100, Known.One);
+ EXPECT_EQ(0x7fff, Known.Zero);
+ EXPECT_EQ(0, Known.One);
}
TEST_F(ValueTrackingTest, HaveNoCommonBitsSet) {
>From 6088dce603fbca8793c584352f17b31092ed064d Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Tue, 4 Feb 2025 09:56:48 +0100
Subject: [PATCH 2/2] AddIndex -> AddIndexToKnown
---
llvm/lib/Analysis/ValueTracking.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 61a14f471e88fff..55feb15dfb15277 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1429,7 +1429,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(I->getType());
APInt AccConstIndices(IndexWidth, 0);
- auto AddIndex = [&](KnownBits IndexBits) {
+ auto AddIndexToKnown = [&](KnownBits IndexBits) {
if (IndexWidth == BitWidth) {
// Note that inbounds does *not* guarantee nsw for the addition, as only
// the offset is signed, while the base address is unsigned.
@@ -1503,10 +1503,10 @@ static void computeKnownBitsFromOperator(const Operator *I,
ScalingFactor =
KnownBits::makeConstant(APInt(IndexWidth, StrideInBytes));
}
- AddIndex(KnownBits::mul(IndexBits, ScalingFactor));
+ AddIndexToKnown(KnownBits::mul(IndexBits, ScalingFactor));
}
if (!Known.isUnknown() && !AccConstIndices.isZero())
- AddIndex(KnownBits::makeConstant(AccConstIndices));
+ AddIndexToKnown(KnownBits::makeConstant(AccConstIndices));
break;
}
case Instruction::PHI: {
More information about the llvm-commits
mailing list