[llvm] d430015 - Revert "[ValueTracking] Remove by-ref computeKnownBits() overloads (NFC)"
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 16 05:04:19 PDT 2023
Author: Nikita Popov
Date: 2023-10-16T14:04:09+02:00
New Revision: d4300154b6e7afff10e6b5f69c244c329ba829f3
URL: https://github.com/llvm/llvm-project/commit/d4300154b6e7afff10e6b5f69c244c329ba829f3
DIFF: https://github.com/llvm/llvm-project/commit/d4300154b6e7afff10e6b5f69c244c329ba829f3.diff
LOG: Revert "[ValueTracking] Remove by-ref computeKnownBits() overloads (NFC)"
This reverts commit b5743d4798b250506965e07ebab806a3c2d767cc.
This causes some minor compile-time impact. Revert for now, better
to do the change more gradually.
Added:
Modified:
llvm/include/llvm/Analysis/ValueTracking.h
llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
llvm/lib/Analysis/DemandedBits.cpp
llvm/lib/Analysis/ScalarEvolution.cpp
llvm/lib/Analysis/ValueTracking.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 191f81e0797c11b..25272e0581c9385 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -46,26 +46,43 @@ class Value;
constexpr unsigned MaxAnalysisRecursionDepth = 6;
-/// Determine which bits of V are known to be either zero or one.
+/// Determine which bits of V are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers. In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
-KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
- unsigned Depth = 0, AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr,
- bool UseInstrInfo = true);
-
-/// Determine which bits of V are known to be either zero or one.
+void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
+
+/// Determine which bits of V are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers. In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the demanded elements in the vector.
+void computeKnownBits(const Value *V, const APInt &DemandedElts,
+ KnownBits &Known, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
+
+/// Returns the known bits rather than passing by reference.
+KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
+
+/// Returns the known bits rather than passing by reference.
KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index 09a08d92c368dd9..dcfcc8f41dd58d0 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -466,6 +466,11 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
/// methods should return the value returned by this function.
virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;
+ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
+ const Instruction *CxtI) const {
+ llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
+ }
+
KnownBits computeKnownBits(const Value *V, unsigned Depth,
const Instruction *CxtI) const {
return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
diff --git a/llvm/lib/Analysis/DemandedBits.cpp b/llvm/lib/Analysis/DemandedBits.cpp
index 2c41451b3aab229..c5017bf52498e44 100644
--- a/llvm/lib/Analysis/DemandedBits.cpp
+++ b/llvm/lib/Analysis/DemandedBits.cpp
@@ -70,10 +70,13 @@ void DemandedBits::determineLiveOperandBits(
KnownBitsComputed = true;
const DataLayout &DL = UserI->getModule()->getDataLayout();
- Known = computeKnownBits(V1, DL, 0, &AC, UserI, &DT);
+ Known = KnownBits(BitWidth);
+ computeKnownBits(V1, Known, DL, 0, &AC, UserI, &DT);
- if (V2)
- Known2 = computeKnownBits(V2, DL, 0, &AC, UserI, &DT);
+ if (V2) {
+ Known2 = KnownBits(BitWidth);
+ computeKnownBits(V2, Known2, DL, 0, &AC, UserI, &DT);
+ }
};
switch (UserI->getOpcode()) {
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index d542f82b83ca148..4850a6aa5625d42 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -7722,8 +7722,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
unsigned LZ = A.countl_zero();
unsigned TZ = A.countr_zero();
unsigned BitWidth = A.getBitWidth();
- KnownBits Known =
- computeKnownBits(BO->LHS, getDataLayout(), 0, &AC, nullptr, &DT);
+ KnownBits Known(BitWidth);
+ computeKnownBits(BO->LHS, Known, getDataLayout(),
+ 0, &AC, nullptr, &DT);
APInt EffectiveMask =
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 18a2562ec2dce92..82310444326d6bb 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -159,6 +159,25 @@ static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
computeKnownBits(V, DemandedElts, Known, Depth, Q);
}
+void llvm::computeKnownBits(const Value *V, KnownBits &Known,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT, bool UseInstrInfo) {
+ ::computeKnownBits(
+ V, Known, Depth,
+ SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+}
+
+void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
+ KnownBits &Known, const DataLayout &DL,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
+ ::computeKnownBits(
+ V, DemandedElts, Known, Depth,
+ SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
+}
+
static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
unsigned Depth, const SimplifyQuery &Q);
@@ -231,9 +250,11 @@ bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
return true;
}
-
- KnownBits LHSKnown = ::computeKnownBits(LHS, 0, SQ);
- KnownBits RHSKnown = ::computeKnownBits(RHS, 0, SQ);
+ IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
+ KnownBits LHSKnown(IT->getBitWidth());
+ KnownBits RHSKnown(IT->getBitWidth());
+ ::computeKnownBits(LHS, LHSKnown, 0, SQ);
+ ::computeKnownBits(RHS, RHSKnown, 0, SQ);
return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
}
@@ -8119,8 +8140,9 @@ static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
// If X & C == 0 then (X | C) == X +_{nuw} C
if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
- KnownBits Known = computeKnownBits(X, DL, Depth + 1, /*AC*/ nullptr,
- /*CxtI*/ nullptr, /*DT*/ nullptr);
+ KnownBits Known(CA->getBitWidth());
+ computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
+ /*CxtI*/ nullptr, /*DT*/ nullptr);
if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
return true;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9a37627e36b9ffa..e831316efff52ba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -12147,7 +12147,9 @@ MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
const GlobalValue *GV = nullptr;
int64_t GVOffset = 0;
if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
- KnownBits Known = llvm::computeKnownBits(GV, getDataLayout());
+ unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
+ KnownBits Known(PtrWidth);
+ llvm::computeKnownBits(GV, Known, getDataLayout());
unsigned AlignBits = Known.countMinTrailingZeros();
if (AlignBits)
return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 5e54a754a02f3f8..51ef72b873a516b 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1270,7 +1270,8 @@ bool PolynomialMultiplyRecognize::highBitsAreZero(Value *V,
if (!T)
return false;
- KnownBits Known = computeKnownBits(V, DL);
+ KnownBits Known(T->getBitWidth());
+ computeKnownBits(V, Known, DL);
return Known.countMinLeadingZeros() >= IterCount;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 88636ff60f5cd4d..e29fb869686ca0b 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -642,7 +642,8 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
}
- KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
+ KnownBits Known(BitWidth);
+ IC.computeKnownBits(Op0, Known, 0, &II);
// If all bits are zero except for exactly one fixed bit, then the result
// must be 0 or 1, and we can get that answer by shifting to LSB:
@@ -2874,7 +2875,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// If there is a dominating assume with the same condition as this one,
// then this one is redundant, and should be removed.
- KnownBits Known = computeKnownBits(IIOperand, 0, II);
+ KnownBits Known(1);
+ computeKnownBits(IIOperand, Known, 0, II);
if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
return eraseInstFromFunction(*II);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 8f15ff178a5800b..7a15c0dee492b5a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3749,7 +3749,8 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
// The motivation for this call into value tracking is to take advantage of
// the assumption cache, so make sure that is populated.
if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
- KnownBits Known = computeKnownBits(CondVal, 0, &SI);
+ KnownBits Known(1);
+ computeKnownBits(CondVal, Known, 0, &SI);
if (Known.One.isOne())
return replaceInstUsesWith(SI, TrueVal);
if (Known.Zero.isOne())
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 308c462482bc8fb..be005e61a8d2d89 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -119,7 +119,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
"Value *V, DemandedMask and Known must have same BitWidth");
if (isa<Constant>(V)) {
- Known = computeKnownBits(V, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
return nullptr;
}
@@ -132,7 +132,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
- Known = computeKnownBits(V, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
return nullptr; // Only analyze instructions.
}
@@ -184,7 +184,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
switch (I->getOpcode()) {
default:
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
break;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
@@ -598,7 +598,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return InsertNewInstWith(And1, I->getIterator());
}
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
break;
}
case Instruction::Shl: {
@@ -660,7 +660,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return I;
}
}
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
}
break;
}
@@ -712,7 +712,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (ShiftAmt)
Known.Zero.setHighBits(ShiftAmt); // high bits known zero.
} else {
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
}
break;
}
@@ -775,7 +775,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Known.One |= HighBits;
}
} else {
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
}
break;
}
@@ -797,7 +797,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Known = KnownBits::udiv(LHSKnown, KnownBits::makeConstant(*SA),
cast<BinaryOperator>(I)->isExact());
} else {
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
}
break;
}
@@ -837,7 +837,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
}
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
break;
}
case Instruction::URem: {
@@ -977,7 +977,7 @@ Value *InstCombinerImpl::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
if (!KnownBitsComputed)
- Known = computeKnownBits(V, Depth, CxtI);
+ computeKnownBits(V, Known, Depth, CxtI);
break;
}
}
@@ -1007,8 +1007,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
// this instruction has a simpler value in that context.
switch (I->getOpcode()) {
case Instruction::And: {
- RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, CxtI);
- LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
Known = LHSKnown & RHSKnown;
computeKnownBitsFromAssume(I, Known, Depth, SQ.getWithInstruction(CxtI));
@@ -1027,8 +1027,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
break;
}
case Instruction::Or: {
- RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, CxtI);
- LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
Known = LHSKnown | RHSKnown;
computeKnownBitsFromAssume(I, Known, Depth, SQ.getWithInstruction(CxtI));
@@ -1049,8 +1049,8 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
break;
}
case Instruction::Xor: {
- RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, CxtI);
- LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
Known = LHSKnown ^ RHSKnown;
computeKnownBitsFromAssume(I, Known, Depth, SQ.getWithInstruction(CxtI));
@@ -1075,11 +1075,11 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
// If an operand adds zeros to every bit below the highest demanded bit,
// that operand doesn't change the result. Return the other side.
- RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
return I->getOperand(0);
- LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
if (DemandedFromOps.isSubsetOf(LHSKnown.Zero))
return I->getOperand(1);
@@ -1094,19 +1094,19 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
// If an operand subtracts zeros from every bit below the highest demanded
// bit, that operand doesn't change the result. Return the other side.
- RHSKnown = computeKnownBits(I->getOperand(1), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
return I->getOperand(0);
bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
- LHSKnown = computeKnownBits(I->getOperand(0), Depth + 1, CxtI);
+ computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
Known = KnownBits::computeForAddSub(/*Add*/ false, NSW, LHSKnown, RHSKnown);
computeKnownBitsFromAssume(I, Known, Depth, SQ.getWithInstruction(CxtI));
break;
}
case Instruction::AShr: {
// Compute the Known bits to simplify things downstream.
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
// If this user is only demanding bits that we know, return the known
// constant.
@@ -1133,7 +1133,7 @@ Value *InstCombinerImpl::SimplifyMultipleUseDemandedBits(
}
default:
// Compute the Known bits to simplify things downstream.
- Known = computeKnownBits(I, Depth, CxtI);
+ computeKnownBits(I, Known, Depth, CxtI);
// If this user is only demanding bits that we know, return the known
// constant.
diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index b92df3012452688..73a50b793e6d2e7 100644
--- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -234,7 +234,9 @@ ValueRange FastDivInsertionTask::getValueRange(Value *V,
unsigned HiBits = LongLen - ShortLen;
const DataLayout &DL = SlowDivOrRem->getModule()->getDataLayout();
- KnownBits Known = computeKnownBits(V, DL);
+ KnownBits Known(LongLen);
+
+ computeKnownBits(V, Known, DL);
if (Known.countMinLeadingZeros() >= HiBits)
return VALRNG_KNOWN_SHORT;
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index b97054be2fc98ec..73a80702671922b 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -1262,8 +1262,9 @@ std::optional<APInt> Vectorizer::getConstantOffsetComplexAddrs(
if (!Safe) {
// When computing known bits, use the GEPs as context instructions, since
// they likely are in the same BB as the load/store.
- KnownBits Known = computeKnownBits((IdxDiff.sge(0) ? ValA : OpB), DL, 0,
- &AC, ContextInst, &DT);
+ KnownBits Known(BitWidth);
+ computeKnownBits((IdxDiff.sge(0) ? ValA : OpB), Known, DL, 0, &AC,
+ ContextInst, &DT);
APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth());
if (Signed)
BitsAllowedToBeSet.clearBit(BitWidth - 1);
More information about the llvm-commits
mailing list