[llvm] r278585 - Constify ValueTracking. NFC.
Pete Cooper via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 12 18:05:33 PDT 2016
Author: pete
Date: Fri Aug 12 20:05:32 2016
New Revision: 278585
URL: http://llvm.org/viewvc/llvm-project?rev=278585&view=rev
Log:
Constify ValueTracking. NFC.
Almost all of the method here are only analysing Value's as opposed to
mutating them. Mark all of the easy ones as const.
Modified:
llvm/trunk/include/llvm/Analysis/ValueTracking.h
llvm/trunk/lib/Analysis/ValueTracking.cpp
Modified: llvm/trunk/include/llvm/Analysis/ValueTracking.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/ValueTracking.h?rev=278585&r1=278584&r2=278585&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/ValueTracking.h (original)
+++ llvm/trunk/include/llvm/Analysis/ValueTracking.h Fri Aug 12 20:05:32 2016
@@ -49,7 +49,7 @@ template <typename T> class ArrayRef;
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -60,14 +60,15 @@ template <typename T> class ArrayRef;
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
APInt &KnownZero, APInt &KnownOne);
/// Return true if LHS and RHS have no common bits set.
- bool haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
+ bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Determine whether the sign bit is known to be zero or one. Convenience
/// wrapper around computeKnownBits.
- void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+ void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -78,7 +79,7 @@ template <typename T> class ArrayRef;
/// of two when defined. Supports values with integer or pointer type and
/// vectors of integers. If 'OrZero' is set, then return true if the given
/// value is either a power of two or zero.
- bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
+ bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -88,34 +89,35 @@ template <typename T> class ArrayRef;
/// vectors, return true if every element is known to be non-zero when
/// defined. Supports values with integer or pointer type and vectors of
/// integers.
- bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the give value is known to be non-negative.
- bool isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNonNegative(const Value *V, const DataLayout &DL,
+ unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the given value is known be positive (i.e. non-negative
/// and non-zero).
- bool isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Returns true if the given value is known be negative (i.e. non-positive
/// and non-zero).
- bool isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// Return true if the given values are known to be non-equal when defined.
/// Supports scalar integer types only.
- bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -129,7 +131,8 @@ template <typename T> class ArrayRef;
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
- bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
+ bool MaskedValueIsZero(const Value *V, const APInt &Mask,
+ const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -141,7 +144,7 @@ template <typename T> class ArrayRef;
/// equal to each other, so we return 3. For vectors, return the number of
/// sign bits for the vector element with the mininum number of known sign
/// bits.
- unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
+ unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
@@ -213,7 +216,7 @@ template <typename T> class ArrayRef;
/// If we can compute the length of the string pointed to by the specified
/// pointer, return 'len+1'. If we can't, return 0.
- uint64_t GetStringLength(Value *V);
+ uint64_t GetStringLength(const Value *V);
/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note
@@ -320,23 +323,25 @@ template <typename T> class ArrayRef;
const DominatorTree *DT = nullptr);
enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
- OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
- OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
- OverflowResult computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+ OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
/// This version also leverages the sign bit of Add if known.
- OverflowResult computeOverflowForSignedAdd(AddOperator *Add,
+ OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
@@ -345,7 +350,8 @@ template <typename T> class ArrayRef;
/// Returns true if the arithmetic part of the \p II 's result is
/// used only along the paths control dependent on the computation
/// not overflowing, \p II being an <op>.with.overflow intrinsic.
- bool isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT);
+ bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
+ const DominatorTree &DT);
/// Return true if this function can prove that the instruction I will
/// always transfer execution to one of its successors (including the next
@@ -445,11 +451,21 @@ template <typename T> class ArrayRef;
///
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
Instruction::CastOps *CastOp = nullptr);
+ static inline SelectPatternResult
+ matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS,
+ Instruction::CastOps *CastOp = nullptr) {
+ Value *L = const_cast<Value*>(LHS);
+ Value *R = const_cast<Value*>(RHS);
+ auto Result = matchSelectPattern(const_cast<Value*>(V), L, R);
+ LHS = L;
+ RHS = R;
+ return Result;
+ }
/// Parse out a conservative ConstantRange from !range metadata.
///
/// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
- ConstantRange getConstantRangeFromMetadata(MDNode &RangeMD);
+ ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD);
/// Return true if RHS is known to be implied true by LHS. Return false if
/// RHS is known to be implied false by LHS. Otherwise, return None if no
@@ -461,10 +477,13 @@ template <typename T> class ArrayRef;
/// T | T | F
/// F | T | T
/// (A)
- Optional<bool> isImpliedCondition(
- Value *LHS, Value *RHS, const DataLayout &DL, bool InvertAPred = false,
- unsigned Depth = 0, AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr);
+ Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
+ bool InvertAPred = false,
+ unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
} // end namespace llvm
#endif
Modified: llvm/trunk/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ValueTracking.cpp?rev=278585&r1=278584&r2=278585&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/trunk/lib/Analysis/ValueTracking.cpp Fri Aug 12 20:05:32 2016
@@ -119,10 +119,10 @@ static const Instruction *safeCxtI(const
return nullptr;
}
-static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
unsigned Depth, const Query &Q);
-void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
@@ -130,7 +130,8 @@ void llvm::computeKnownBits(Value *V, AP
Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
+bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
+ const DataLayout &DL,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
assert(LHS->getType() == RHS->getType() &&
@@ -145,10 +146,10 @@ bool llvm::haveNoCommonBitsSet(Value *LH
return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
}
-static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q);
-void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
@@ -156,10 +157,11 @@ void llvm::ComputeSignBit(Value *V, bool
Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
+static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
const Query &Q);
-bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
+bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
+ bool OrZero,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
@@ -167,15 +169,16 @@ bool llvm::isKnownToBeAPowerOfTwo(Value
Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q);
+static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
-bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
+bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
+bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
+ unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
bool NonNegative, Negative;
@@ -183,7 +186,7 @@ bool llvm::isKnownNonNegative(Value *V,
return NonNegative;
}
-bool llvm::isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth,
+bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
if (auto *CI = dyn_cast<ConstantInt>(V))
@@ -195,7 +198,7 @@ bool llvm::isKnownPositive(Value *V, con
isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
}
-bool llvm::isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth,
+bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
bool NonNegative, Negative;
@@ -203,41 +206,45 @@ bool llvm::isKnownNegative(Value *V, con
return Negative;
}
-static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q);
+static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
-bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
+ const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
return ::isKnownNonEqual(V1, V2, Query(DL, AC,
safeCxtI(V1, safeCxtI(V2, CxtI)),
DT));
}
-static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
+static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
const Query &Q);
-bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
+bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
+ const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT) {
return ::MaskedValueIsZero(V, Mask, Depth,
Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q);
+static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
+ const Query &Q);
-unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
+unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
}
-static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
+static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
+ bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
unsigned Depth, const Query &Q) {
if (!Add) {
- if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
+ if (const ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits
// than C (i.e. no wrap-around can happen). For example, 20-X is
// positive if we can prove that X is >= 0 and < 16.
@@ -311,7 +318,7 @@ static void computeKnownBitsAddSub(bool
}
}
-static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
+static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
unsigned Depth, const Query &Q) {
@@ -503,7 +510,7 @@ bool llvm::isValidAssumeForContext(const
return !isEphemeralValueOf(Inv, CxtI);
}
-static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
+static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
APInt &KnownOne, unsigned Depth,
const Query &Q) {
// Use of assumptions is context-sensitive. If we don't have a context, we
@@ -776,7 +783,7 @@ static void computeKnownBitsFromAssume(V
// operator's result respectively for that shift amount. The results from calling
// KZF and KOF are conservatively combined for all permitted shift amounts.
template <typename KZFunctor, typename KOFunctor>
-static void computeKnownBitsFromShiftOperator(Operator *I,
+static void computeKnownBitsFromShiftOperator(const Operator *I,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
@@ -853,7 +860,7 @@ static void computeKnownBitsFromShiftOpe
}
}
-static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
+static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
APInt &KnownOne, unsigned Depth,
const Query &Q) {
unsigned BitWidth = KnownZero.getBitWidth();
@@ -941,7 +948,8 @@ static void computeKnownBitsFromOperator
computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
- Value *LHS, *RHS;
+ const Value *LHS;
+ const Value *RHS;
SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
if (SelectPatternResult::isMinOrMax(SPF)) {
computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
@@ -1188,7 +1196,7 @@ static void computeKnownBitsFromOperator
}
case Instruction::Alloca: {
- AllocaInst *AI = cast<AllocaInst>(I);
+ const AllocaInst *AI = cast<AllocaInst>(I);
unsigned Align = AI->getAlignment();
if (Align == 0)
Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
@@ -1245,7 +1253,7 @@ static void computeKnownBitsFromOperator
break;
}
case Instruction::PHI: {
- PHINode *P = cast<PHINode>(I);
+ const PHINode *P = cast<PHINode>(I);
// Handle the case of a simple two-predecessor recurrence PHI.
// There's a lot more that could theoretically be done here, but
// this is sufficient to catch some interesting cases.
@@ -1365,12 +1373,12 @@ static void computeKnownBitsFromOperator
// function.
if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
- if (Value *RV = CallSite(I).getReturnedArgOperand()) {
+ if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
KnownZero |= KnownZero2;
KnownOne |= KnownOne2;
}
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::bswap:
@@ -1409,7 +1417,7 @@ static void computeKnownBitsFromOperator
break;
case Instruction::ExtractValue:
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
- ExtractValueInst *EVI = cast<ExtractValueInst>(I);
+ const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
if (EVI->getNumIndices() != 1) break;
if (EVI->getIndices()[0] == 0) {
switch (II->getIntrinsicID()) {
@@ -1453,7 +1461,7 @@ static void computeKnownBitsFromOperator
/// where V is a vector, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
-void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
unsigned Depth, const Query &Q) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
@@ -1469,7 +1477,7 @@ void computeKnownBits(Value *V, APInt &K
KnownOne.getBitWidth() == BitWidth &&
"V, KnownOne and KnownZero should have same BitWidth");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// We know all of the bits for a constant!
KnownOne = CI->getValue();
KnownZero = ~KnownOne;
@@ -1483,7 +1491,7 @@ void computeKnownBits(Value *V, APInt &K
}
// Handle a constant vector by taking the intersection of the known bits of
// each element.
- if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
+ if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
// We know that CDS must be a vector of integers. Take the intersection of
// each element.
KnownZero.setAllBits(); KnownOne.setAllBits();
@@ -1496,7 +1504,7 @@ void computeKnownBits(Value *V, APInt &K
return;
}
- if (auto *CV = dyn_cast<ConstantVector>(V)) {
+ if (const auto *CV = dyn_cast<ConstantVector>(V)) {
// We know that CV must be a vector of integers. Take the intersection of
// each element.
KnownZero.setAllBits(); KnownOne.setAllBits();
@@ -1526,13 +1534,13 @@ void computeKnownBits(Value *V, APInt &K
// A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
// the bits of its aliasee.
- if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (!GA->isInterposable())
computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
return;
}
- if (Operator *I = dyn_cast<Operator>(V))
+ if (const Operator *I = dyn_cast<Operator>(V))
computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
// Aligned pointers have trailing zeros - refine KnownZero set
@@ -1553,7 +1561,7 @@ void computeKnownBits(Value *V, APInt &K
/// Determine whether the sign bit is known to be zero or one.
/// Convenience wrapper around computeKnownBits.
-void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q) {
unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
if (!BitWidth) {
@@ -1572,9 +1580,9 @@ void ComputeSignBit(Value *V, bool &Know
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
+bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
const Query &Q) {
- if (Constant *C = dyn_cast<Constant>(V)) {
+ if (const Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
return OrZero;
@@ -1604,10 +1612,10 @@ bool isKnownToBeAPowerOfTwo(Value *V, bo
match(V, m_LShr(m_Value(X), m_Value()))))
return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
+ if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
- if (SelectInst *SI = dyn_cast<SelectInst>(V))
+ if (const SelectInst *SI = dyn_cast<SelectInst>(V))
return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
@@ -1625,7 +1633,7 @@ bool isKnownToBeAPowerOfTwo(Value *V, bo
// Adding a power-of-two or zero to the same power-of-two or zero yields
// either the original power-of-two, a larger power-of-two or zero.
if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
- OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
+ const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
if (match(X, m_And(m_Specific(Y), m_Value())) ||
match(X, m_And(m_Value(), m_Specific(Y))))
@@ -1671,7 +1679,7 @@ bool isKnownToBeAPowerOfTwo(Value *V, bo
/// to be non-null.
///
/// Currently this routine does not support vector GEPs.
-static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth,
+static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
const Query &Q) {
if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
return false;
@@ -1730,7 +1738,7 @@ static bool isGEPKnownNonNull(GEPOperato
/// Does the 'Range' metadata (which must be a valid MD_range operand list)
/// ensure that the value it's attached to is never Value? 'RangeType' is
/// is the type of the value described by the range.
-static bool rangeMetadataExcludesValue(MDNode* Ranges, const APInt& Value) {
+static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
const unsigned NumRanges = Ranges->getNumOperands() / 2;
assert(NumRanges >= 1);
for (unsigned i = 0; i < NumRanges; ++i) {
@@ -1749,7 +1757,7 @@ static bool rangeMetadataExcludesValue(M
/// For vectors return true if every element is known to be non-zero when
/// defined. Supports values with integer or pointer type and vectors of
/// integers.
-bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) {
+bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
if (auto *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
return false;
@@ -1793,7 +1801,7 @@ bool isKnownNonZero(Value *V, unsigned D
if (V->getType()->isPointerTy()) {
if (isKnownNonNull(V))
return true;
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
if (isGEPKnownNonNull(GEP, Depth, Q))
return true;
}
@@ -1813,7 +1821,7 @@ bool isKnownNonZero(Value *V, unsigned D
// if the lowest bit is shifted off the end.
if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
- OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
+ const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
return isKnownNonZero(X, Depth, Q);
@@ -1827,7 +1835,7 @@ bool isKnownNonZero(Value *V, unsigned D
// defined if the sign bit is shifted off the end.
else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
// shr exact can only shift out zero bits.
- PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
+ const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
if (BO->isExact())
return isKnownNonZero(X, Depth, Q);
@@ -1898,7 +1906,7 @@ bool isKnownNonZero(Value *V, unsigned D
}
// X * Y.
else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
- OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
+ const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
// If X and Y are non-zero then so is X * Y as long as the multiplication
// does not overflow.
if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
@@ -1906,13 +1914,13 @@ bool isKnownNonZero(Value *V, unsigned D
return true;
}
// (C ? X : Y) != 0 if X != 0 and Y != 0.
- else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
+ else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
isKnownNonZero(SI->getFalseValue(), Depth, Q))
return true;
}
// PHI
- else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
// Try and detect a recurrence that monotonically increases from a
// starting value, as these are common as induction variables.
if (PN->getNumIncomingValues() == 2) {
@@ -1946,8 +1954,8 @@ bool isKnownNonZero(Value *V, unsigned D
}
/// Return true if V2 == V1 + X, where X is known non-zero.
-static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) {
- BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
+static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
+ const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
if (!BO || BO->getOpcode() != Instruction::Add)
return false;
Value *Op = nullptr;
@@ -1961,7 +1969,7 @@ static bool isAddOfNonZero(Value *V1, Va
}
/// Return true if it is known that V1 != V2.
-static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) {
+static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
if (V1->getType()->isVectorTy() || V1 == V2)
return false;
if (V1->getType() != V2->getType())
@@ -1997,7 +2005,7 @@ static bool isKnownNonEqual(Value *V1, V
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
-bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
+bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
const Query &Q) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
@@ -2008,8 +2016,9 @@ bool MaskedValueIsZero(Value *V, const A
/// minimum number of sign bits. Return 0 if the value is not a vector constant
/// or if any element was not analyzed; otherwise, return the count for the
/// element with the minimum number of sign bits.
-static unsigned computeNumSignBitsVectorConstant(Value *V, unsigned TyBits) {
- auto *CV = dyn_cast<Constant>(V);
+static unsigned computeNumSignBitsVectorConstant(const Value *V,
+ unsigned TyBits) {
+ const auto *CV = dyn_cast<Constant>(V);
if (!CV || !CV->getType()->isVectorTy())
return 0;
@@ -2037,7 +2046,7 @@ static unsigned computeNumSignBitsVector
/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
/// other, so we return 3. For vectors, return the number of sign bits for the
/// vector element with the mininum number of known sign bits.
-unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) {
+unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) {
unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
unsigned Tmp, Tmp2;
unsigned FirstAnswer = 1;
@@ -2048,7 +2057,7 @@ unsigned ComputeNumSignBits(Value *V, un
if (Depth == 6)
return 1; // Limit search depth.
- Operator *U = dyn_cast<Operator>(V);
+ const Operator *U = dyn_cast<Operator>(V);
switch (Operator::getOpcode(V)) {
default: break;
case Instruction::SExt:
@@ -2206,7 +2215,7 @@ unsigned ComputeNumSignBits(Value *V, un
return std::min(Tmp, Tmp2)-1;
case Instruction::PHI: {
- PHINode *PN = cast<PHINode>(U);
+ const PHINode *PN = cast<PHINode>(U);
unsigned NumIncomingValues = PN->getNumIncomingValues();
// Don't analyze large in-degree PHIs.
if (NumIncomingValues > 4) break;
@@ -2967,13 +2976,14 @@ bool llvm::getConstantStringInfo(const V
/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
-static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
+static uint64_t GetStringLengthH(const Value *V,
+ SmallPtrSetImpl<const PHINode*> &PHIs) {
// Look through noop bitcast instructions.
V = V->stripPointerCasts();
// If this is a PHI node, there are two cases: either we have already seen it
// or we haven't.
- if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ if (const PHINode *PN = dyn_cast<PHINode>(V)) {
if (!PHIs.insert(PN).second)
return ~0ULL; // already in the set.
@@ -2995,7 +3005,7 @@ static uint64_t GetStringLengthH(Value *
}
// strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
- if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
+ if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
if (Len1 == 0) return 0;
uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
@@ -3016,10 +3026,10 @@ static uint64_t GetStringLengthH(Value *
/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
-uint64_t llvm::GetStringLength(Value *V) {
+uint64_t llvm::GetStringLength(const Value *V) {
if (!V->getType()->isPointerTy()) return 0;
- SmallPtrSet<PHINode*, 32> PHIs;
+ SmallPtrSet<const PHINode*, 32> PHIs;
uint64_t Len = GetStringLengthH(V, PHIs);
// If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
// an empty string as a length.
@@ -3028,7 +3038,8 @@ uint64_t llvm::GetStringLength(Value *V)
/// \brief \p PN defines a loop-variant pointer to an object. Check if the
/// previous iteration of the loop was referring to the same object as \p PN.
-static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
+static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
+ const LoopInfo *LI) {
// Find the loop-defined value.
Loop *L = LI->getLoopFor(PN->getParent());
if (PN->getNumIncomingValues() != 2)
@@ -3353,7 +3364,8 @@ bool llvm::isKnownNonNullAt(const Value
return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
}
-OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
@@ -3403,7 +3415,8 @@ OverflowResult llvm::computeOverflowForU
return OverflowResult::MayOverflow;
}
-OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
+OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
@@ -3432,9 +3445,13 @@ OverflowResult llvm::computeOverflowForU
return OverflowResult::MayOverflow;
}
-static OverflowResult computeOverflowForSignedAdd(
- Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
+static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
+ const Value *RHS,
+ const AddOperator *Add,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
if (Add && Add->hasNoSignedWrap()) {
return OverflowResult::NeverOverflows;
}
@@ -3476,7 +3493,8 @@ static OverflowResult computeOverflowFor
return OverflowResult::MayOverflow;
}
-bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) {
+bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
+ const DominatorTree &DT) {
#ifndef NDEBUG
auto IID = II->getIntrinsicID();
assert((IID == Intrinsic::sadd_with_overflow ||
@@ -3488,11 +3506,11 @@ bool llvm::isOverflowIntrinsicNoWrap(Int
"Not an overflow intrinsic!");
#endif
- SmallVector<BranchInst *, 2> GuardingBranches;
- SmallVector<ExtractValueInst *, 2> Results;
+ SmallVector<const BranchInst *, 2> GuardingBranches;
+ SmallVector<const ExtractValueInst *, 2> Results;
- for (User *U : II->users()) {
- if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
+ for (const User *U : II->users()) {
+ if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
if (EVI->getIndices()[0] == 0)
@@ -3500,8 +3518,8 @@ bool llvm::isOverflowIntrinsicNoWrap(Int
else {
assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
- for (auto *U : EVI->users())
- if (auto *B = dyn_cast<BranchInst>(U)) {
+ for (const auto *U : EVI->users())
+ if (const auto *B = dyn_cast<BranchInst>(U)) {
assert(B->isConditional() && "How else is it using an i1?");
GuardingBranches.push_back(B);
}
@@ -3513,13 +3531,13 @@ bool llvm::isOverflowIntrinsicNoWrap(Int
}
}
- auto AllUsesGuardedByBranch = [&](BranchInst *BI) {
+ auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
if (!NoWrapEdge.isSingleEdge())
return false;
// Check if all users of the add are provably no-wrap.
- for (auto *Result : Results) {
+ for (const auto *Result : Results) {
// If the extractvalue itself is not executed on overflow, the we don't
// need to check each use separately, since domination is transitive.
if (DT.dominates(NoWrapEdge, Result->getParent()))
@@ -3537,7 +3555,7 @@ bool llvm::isOverflowIntrinsicNoWrap(Int
}
-OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
+OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
@@ -3546,7 +3564,8 @@ OverflowResult llvm::computeOverflowForS
Add, DL, AC, CxtI, DT);
}
-OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
+ const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
@@ -3769,7 +3788,7 @@ bool llvm::isKnownNotFullPoison(const In
return false;
}
-static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
+static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
if (FMF.noNaNs())
return true;
@@ -3778,7 +3797,7 @@ static bool isKnownNonNaN(Value *V, Fast
return false;
}
-static bool isKnownNonZero(Value *V) {
+static bool isKnownNonZero(const Value *V) {
if (auto *C = dyn_cast<ConstantFP>(V))
return !C->isZero();
return false;
@@ -4013,7 +4032,7 @@ SelectPatternResult llvm::matchSelectPat
LHS, RHS);
}
-ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
+ConstantRange llvm::getConstantRangeFromMetadata(const MDNode &Ranges) {
const unsigned NumRanges = Ranges.getNumOperands() / 2;
assert(NumRanges >= 1 && "Must have at least one range!");
assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
@@ -4036,7 +4055,8 @@ ConstantRange llvm::getConstantRangeFrom
}
/// Return true if "icmp Pred LHS RHS" is always true.
-static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+static bool isTruePredicate(CmpInst::Predicate Pred,
+ const Value *LHS, const Value *RHS,
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
@@ -4065,7 +4085,8 @@ static bool isTruePredicate(CmpInst::Pre
return true;
// Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
- auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
+ auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
+ const Value *&X,
const APInt *&CA, const APInt *&CB) {
if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
@@ -4085,7 +4106,7 @@ static bool isTruePredicate(CmpInst::Pre
return false;
};
- Value *X;
+ const Value *X;
const APInt *CLHS, *CRHS;
if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
return CLHS->ule(*CRHS);
@@ -4098,8 +4119,9 @@ static bool isTruePredicate(CmpInst::Pre
/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
/// ALHS ARHS" is true. Otherwise, return None.
static Optional<bool>
-isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS, Value *ARHS,
- Value *BLHS, Value *BRHS, const DataLayout &DL,
+isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
+ const Value *ARHS, const Value *BLHS,
+ const Value *BRHS, const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT) {
switch (Pred) {
@@ -4126,7 +4148,8 @@ isImpliedCondOperands(CmpInst::Predicate
/// Return true if the operands of the two compares match. IsSwappedOps is true
/// when the operands match, but are swapped.
-static bool isMatchingOps(Value *ALHS, Value *ARHS, Value *BLHS, Value *BRHS,
+static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
+ const Value *BLHS, const Value *BRHS,
bool &IsSwappedOps) {
bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
@@ -4138,9 +4161,11 @@ static bool isMatchingOps(Value *ALHS, V
/// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
/// BRHS" is false. Otherwise, return None if we can't infer anything.
static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
- Value *ALHS, Value *ARHS,
+ const Value *ALHS,
+ const Value *ARHS,
CmpInst::Predicate BPred,
- Value *BLHS, Value *BRHS,
+ const Value *BLHS,
+ const Value *BRHS,
bool IsSwappedOps) {
// Canonicalize the operands so they're matching.
if (IsSwappedOps) {
@@ -4159,9 +4184,10 @@ static Optional<bool> isImpliedCondMatch
/// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
/// C2" is false. Otherwise, return None if we can't infer anything.
static Optional<bool>
-isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, Value *ALHS,
- ConstantInt *C1, CmpInst::Predicate BPred,
- Value *BLHS, ConstantInt *C2) {
+isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
+ const ConstantInt *C1,
+ CmpInst::Predicate BPred,
+ const Value *BLHS, const ConstantInt *C2) {
assert(ALHS == BLHS && "LHS operands must match.");
ConstantRange DomCR =
ConstantRange::makeExactICmpRegion(APred, C1->getValue());
@@ -4176,7 +4202,7 @@ isImpliedCondMatchingImmOperands(CmpInst
return None;
}
-Optional<bool> llvm::isImpliedCondition(Value *LHS, Value *RHS,
+Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
const DataLayout &DL, bool InvertAPred,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
More information about the llvm-commits
mailing list