[clang] [llvm] [ValueTracking] Convert `isKnownNonZero` to use SimplifyQuery (PR #85863)
Yingwei Zheng via cfe-commits
cfe-commits at lists.llvm.org
Fri Apr 12 00:58:17 PDT 2024
https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/85863
>From 9b725ffdb93b3029263129063d021063783f9cd9 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 21 Mar 2024 21:10:46 +0800
Subject: [PATCH 1/4] [ValueTracking] Add pre-commit tests. NFC.
---
llvm/test/Transforms/InstCombine/icmp-dom.ll | 139 +++++++++++++++++++
1 file changed, 139 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/icmp-dom.ll b/llvm/test/Transforms/InstCombine/icmp-dom.ll
index f4b9022d14349b2..138254d912b259b 100644
--- a/llvm/test/Transforms/InstCombine/icmp-dom.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-dom.ll
@@ -403,3 +403,142 @@ truelabel:
falselabel:
ret i8 0
}
+
+define i1 @and_mask1_eq(i32 %conv) {
+; CHECK-LABEL: @and_mask1_eq(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV:%.*]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 3
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
+; CHECK-NEXT: ret i1 [[CMP1]]
+;
+entry:
+ %and = and i32 %conv, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i1 0
+
+else:
+ %and1 = and i32 %conv, 3
+ %cmp1 = icmp eq i32 %and1, 0
+ ret i1 %cmp1
+}
+
+define i1 @and_mask1_ne(i32 %conv) {
+; CHECK-LABEL: @and_mask1_ne(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV:%.*]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 3
+; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND1]], 0
+; CHECK-NEXT: ret i1 [[CMP1]]
+;
+entry:
+ %and = and i32 %conv, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i1 0
+
+else:
+ %and1 = and i32 %conv, 3
+ %cmp1 = icmp ne i32 %and1, 0
+ ret i1 %cmp1
+}
+
+define i1 @and_mask2(i32 %conv) {
+; CHECK-LABEL: @and_mask2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV:%.*]], 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 3
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
+; CHECK-NEXT: ret i1 [[CMP1]]
+;
+entry:
+ %and = and i32 %conv, 4
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i1 0
+
+else:
+ %and1 = and i32 %conv, 3
+ %cmp1 = icmp eq i32 %and1, 0
+ ret i1 %cmp1
+}
+
+; TODO: %cmp1 can be folded into false.
+
+define i1 @and_mask3(i32 %conv) {
+; CHECK-LABEL: @and_mask3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV:%.*]], 3
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 7
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
+; CHECK-NEXT: ret i1 [[CMP1]]
+;
+entry:
+ %and = and i32 %conv, 3
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i1 0
+
+else:
+ %and1 = and i32 %conv, 7
+ %cmp1 = icmp eq i32 %and1, 0
+ ret i1 %cmp1
+}
+
+; TODO: %cmp1 can be folded into false.
+
+define i1 @and_mask4(i32 %conv) {
+; CHECK-LABEL: @and_mask4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[CONV:%.*]], 4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: ret i1 false
+; CHECK: else:
+; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 7
+; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
+; CHECK-NEXT: ret i1 [[CMP1]]
+;
+entry:
+ %and = and i32 %conv, 4
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i1 0
+
+else:
+ %and1 = and i32 %conv, 7
+ %cmp1 = icmp eq i32 %and1, 0
+ ret i1 %cmp1
+}
>From b80b7de04dcad4be5eff5545ee0a67984c55f17e Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 21 Mar 2024 21:21:13 +0800
Subject: [PATCH 2/4] [ValueTracking] Convert `isKnownNonZero` to use
SimplifyQuery
---
llvm/include/llvm/Analysis/ValueTracking.h | 6 +---
llvm/lib/Analysis/BasicAliasAnalysis.cpp | 3 +-
llvm/lib/Analysis/InstructionSimplify.cpp | 29 ++++++++-----------
llvm/lib/Analysis/LazyValueInfo.cpp | 5 ++--
llvm/lib/Analysis/Loads.cpp | 6 ++--
llvm/lib/Analysis/ScalarEvolution.cpp | 2 +-
llvm/lib/Analysis/ValueTracking.cpp | 17 +++--------
llvm/lib/CodeGen/CodeGenPrepare.cpp | 2 +-
.../Transforms/IPO/AttributorAttributes.cpp | 5 ++--
llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 2 +-
.../InstCombine/InstCombineAddSub.cpp | 2 +-
.../InstCombine/InstCombineAndOrXor.cpp | 8 ++---
.../InstCombine/InstCombineCalls.cpp | 13 +++++----
.../InstCombine/InstCombineCompares.cpp | 21 ++++++--------
.../Transforms/InstCombine/InstCombinePHI.cpp | 3 +-
.../InstCombine/InstructionCombining.cpp | 2 +-
.../Instrumentation/MemorySanitizer.cpp | 4 +--
.../Utils/PromoteMemoryToRegister.cpp | 2 +-
.../lib/Transforms/Utils/SimplifyLibCalls.cpp | 20 ++++++-------
.../Transforms/Vectorize/VectorCombine.cpp | 3 +-
llvm/test/Transforms/InstCombine/icmp-dom.ll | 14 ++-------
llvm/unittests/Analysis/ValueTrackingTest.cpp | 15 ++++++----
22 files changed, 83 insertions(+), 101 deletions(-)
diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h
index 3970efba18cc8c8..9db0894162afca1 100644
--- a/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/llvm/include/llvm/Analysis/ValueTracking.h
@@ -124,11 +124,7 @@ bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
/// specified, perform context-sensitive analysis and return true if the
/// pointer couldn't possibly be null at the specified instruction.
/// Supports values with integer or pointer type and vectors of integers.
-bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr,
- bool UseInstrInfo = true);
+bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q);
/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 8dfc4b2a1cb1f49..b082dfe8fbd217f 100644
--- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -1283,7 +1283,8 @@ AliasResult BasicAAResult::aliasGEP(
// VarIndex = Scale*V.
const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
if (Var.Val.TruncBits == 0 &&
- isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
+ isKnownNonZero(Var.Val.V, /*Depth=*/0,
+ SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
// Check if abs(V*Scale) >= abs(Scale) holds in the presence of
// potentially wrapping math.
auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 3c943a09a9c2324..733e338ed479f39 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -1586,12 +1586,10 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
if (match(UnsignedICmp,
m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
- EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, /*Depth=*/0, Q))
return UnsignedICmp;
if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
- EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, /*Depth=*/0, Q))
return UnsignedICmp;
}
}
@@ -1609,13 +1607,13 @@ static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
// X > Y && Y == 0 --> Y == 0 iff X != 0
// X > Y || Y == 0 --> X > Y iff X != 0
if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
- isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(X, /*Depth=*/0, Q))
return IsAnd ? ZeroICmp : UnsignedICmp;
// X <= Y && Y != 0 --> X <= Y iff X != 0
// X <= Y || Y != 0 --> Y != 0 iff X != 0
if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
- isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(X, /*Depth=*/0, Q))
return IsAnd ? UnsignedICmp : ZeroICmp;
// The transforms below here are expected to be handled more generally with
@@ -2821,11 +2819,10 @@ static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
// the other operand can not be based on the alloc - if it were, then
// the cmp itself would be a capture.
Value *MI = nullptr;
- if (isAllocLikeFn(LHS, TLI) &&
- llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
+ if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, /*Depth=*/0, Q))
MI = LHS;
else if (isAllocLikeFn(RHS, TLI) &&
- llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
+ llvm::isKnownNonZero(LHS, /*Depth=*/0, Q))
MI = RHS;
if (MI) {
// FIXME: This is incorrect, see PR54002. While we can assume that the
@@ -2981,12 +2978,12 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
+ if (isKnownNonZero(LHS, /*Depth=*/0, Q))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
+ if (isKnownNonZero(LHS, /*Depth=*/0, Q))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT: {
@@ -3001,8 +2998,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getTrue(ITy);
- if (LHSKnown.isNonNegative() &&
- isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
return getFalse(ITy);
break;
}
@@ -3018,8 +3014,7 @@ static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
if (LHSKnown.isNegative())
return getFalse(ITy);
- if (LHSKnown.isNonNegative() &&
- isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, /*Depth=*/0, Q))
return getTrue(ITy);
break;
}
@@ -3172,7 +3167,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
const APInt *C;
if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
(match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
- if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
+ if (isKnownNonZero(RHS, /*Depth=*/0, Q)) {
switch (Pred) {
default:
break;
@@ -3405,7 +3400,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
- !isKnownNonZero(LBO->getOperand(0), Q.DL))
+ !isKnownNonZero(LBO->getOperand(0), /*Depth=*/0, Q))
break;
if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
RBO->getOperand(1), Q, MaxRecurse - 1))
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 6cded828c25f4a6..3223b0564e6c9db 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -645,7 +645,7 @@ LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
// instruction is placed, even if it could legally be hoisted much higher.
// That is unfortunate.
PointerType *PT = dyn_cast<PointerType>(BBI->getType());
- if (PT && isKnownNonZero(BBI, DL))
+ if (PT && isKnownNonZero(BBI, /*Depth=*/0, DL))
return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
if (BBI->getType()->isIntegerTy()) {
@@ -1863,7 +1863,8 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Module *M = CxtI->getModule();
const DataLayout &DL = M->getDataLayout();
if (V->getType()->isPointerTy() && C->isNullValue() &&
- isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
+ isKnownNonZero(V->stripPointerCastsSameRepresentation(), /*Depth=*/0,
+ DL)) {
if (Pred == ICmpInst::ICMP_EQ)
return LazyValueInfo::False;
else if (Pred == ICmpInst::ICMP_NE)
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 5916d2ab48ececf..b5403408cf2ab38 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -99,7 +99,8 @@ static bool isDereferenceableAndAlignedPointer(
CheckForFreed));
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
!CheckForFreed)
- if (!CheckForNonNull || isKnownNonZero(V, DL, 0, AC, CtxI, DT)) {
+ if (!CheckForNonNull ||
+ isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI))) {
// As we recursed through GEPs to get here, we've incrementally checked
// that each step advanced by a multiple of the alignment. If our base is
// properly aligned, then the original offset accessed must also be.
@@ -133,7 +134,8 @@ static bool isDereferenceableAndAlignedPointer(
if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
- isKnownNonZero(V, DL, 0, AC, CtxI, DT) && !V->canBeFreed()) {
+ isKnownNonZero(V, /*Depth=*/0, SimplifyQuery(DL, DT, AC, CtxI)) &&
+ !V->canBeFreed()) {
// As we recursed through GEPs to get here, we've incrementally
// checked that each step advanced by a multiple of the alignment. If
// our base is properly aligned, then the original offset accessed
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 9fcce797f559763..733a2fb1719aa86 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6893,7 +6893,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
uint64_t Rem = MaxVal.urem(Align);
MaxVal -= APInt(BitWidth, Rem);
APInt MinVal = APInt::getZero(BitWidth);
- if (llvm::isKnownNonZero(V, DL))
+ if (llvm::isKnownNonZero(V, /*Depth=*/0, DL))
MinVal = Align;
ConservativeResult = ConservativeResult.intersectWith(
ConstantRange::getNonEmpty(MinVal, MaxVal + 1), RangeType);
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index eb8925c53475372..7d3893061184afb 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -274,16 +274,6 @@ bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
unsigned Depth, const SimplifyQuery &Q);
-static bool isKnownNonZero(const Value *V, unsigned Depth,
- const SimplifyQuery &Q);
-
-bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT, bool UseInstrInfo) {
- return ::isKnownNonZero(
- V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
-}
-
bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
unsigned Depth) {
return computeKnownBits(V, Depth, SQ).isNonNegative();
@@ -298,7 +288,7 @@ bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ,
// this updated.
KnownBits Known = computeKnownBits(V, Depth, SQ);
return Known.isNonNegative() &&
- (Known.isNonZero() || ::isKnownNonZero(V, Depth, SQ));
+ (Known.isNonZero() || isKnownNonZero(V, Depth, SQ));
}
bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ,
@@ -3093,11 +3083,12 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
return false;
}
-bool isKnownNonZero(const Value *V, unsigned Depth, const SimplifyQuery &Q) {
+bool llvm::isKnownNonZero(const Value *V, unsigned Depth,
+ const SimplifyQuery &Q) {
auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
- return isKnownNonZero(V, DemandedElts, Depth, Q);
+ return ::isKnownNonZero(V, DemandedElts, Depth, Q);
}
/// If the pair of operators are the same invertible function, return the
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index e657872c3828482..4225b2c3c48dac5 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2314,7 +2314,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
// Bail if the value is never zero.
Use &Op = CountZeros->getOperandUse(0);
- if (isKnownNonZero(Op, *DL))
+ if (isKnownNonZero(Op, /*Depth=*/0, SimplifyQuery(*DL)))
return false;
// The intrinsic will be sunk behind a compare against zero and branch.
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index ff680e998e71dbb..f27d8d64a104045 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -2452,8 +2452,9 @@ bool AANonNull::isImpliedByIR(Attributor &A, const IRPosition &IRP,
}
if (llvm::any_of(Worklist, [&](AA::ValueAndContext VAC) {
- return !isKnownNonZero(VAC.getValue(), A.getDataLayout(), 0, AC,
- VAC.getCtxI(), DT);
+ return !isKnownNonZero(
+ VAC.getValue(), /*Depth=*/0,
+ SimplifyQuery(A.getDataLayout(), DT, AC, VAC.getCtxI()));
}))
return false;
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 7ebf265e17ba1f7..14612b251d1a42d 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -1175,7 +1175,7 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
Value *RetVal = FlowsToReturn[i];
// If this value is locally known to be non-null, we're good
- if (isKnownNonZero(RetVal, DL))
+ if (isKnownNonZero(RetVal, /*Depth=*/0, DL))
continue;
// Otherwise, we need to look upwards since we can't make any local
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index fa1e2280991fdd2..07c50d866544b38 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -988,7 +988,7 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
if (C->isOne()) {
if (match(Op0, m_ZExt(m_Add(m_Value(X), m_AllOnes())))) {
const SimplifyQuery Q = SQ.getWithInstruction(&Add);
- if (llvm::isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (llvm::isKnownNonZero(X, /*Depth=*/0, Q))
return new ZExtInst(X, Ty);
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index c0cf1a7db726003..2c0c4ee46e80980 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1031,10 +1031,6 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
!ICmpInst::isEquality(EqPred))
return nullptr;
- auto IsKnownNonZero = [&](Value *V) {
- return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
- };
-
ICmpInst::Predicate UnsignedPred;
Value *A, *B;
@@ -1043,9 +1039,9 @@ static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
(ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
- if (!IsKnownNonZero(NonZero))
+ if (!isKnownNonZero(NonZero, /*Depth=*/0, Q))
std::swap(NonZero, Other);
- return IsKnownNonZero(NonZero);
+ return isKnownNonZero(NonZero, /*Depth=*/0, Q);
};
// Given ZeroCmpOp = (A + B)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index b1017f4d6bc179e..b35ab8751a21182 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -601,8 +601,8 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
// then change the 'ZeroIsPoison' parameter to 'true'
// because we know the zero behavior can't affect the result.
if (!Known.One.isZero() ||
- isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
- &IC.getDominatorTree())) {
+ isKnownNonZero(Op0, /*Depth=*/0,
+ IC.getSimplifyQuery().getWithInstruction(&II))) {
if (!match(II.getArgOperand(1), m_One()))
return IC.replaceOperand(II, 1, IC.Builder.getTrue());
}
@@ -2061,7 +2061,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
// See if we can deduce non-null.
if (!CI.hasRetAttr(Attribute::NonNull) &&
(Known.isNonZero() ||
- isKnownNonZero(II, DL, /*Depth*/ 0, &AC, II, &DT))) {
+ isKnownNonZero(II, /*Depth=*/0,
+ getSimplifyQuery().getWithInstruction(II)))) {
CI.addRetAttr(Attribute::NonNull);
Changed = true;
}
@@ -3648,7 +3649,8 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
for (Value *V : Call.args()) {
if (V->getType()->isPointerTy() &&
!Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
- isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
+ isKnownNonZero(V, /*Depth=*/0,
+ getSimplifyQuery().getWithInstruction(&Call)))
ArgNos.push_back(ArgNo);
ArgNo++;
}
@@ -3828,7 +3830,8 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
// isKnownNonNull -> nonnull attribute
if (!GCR.hasRetAttr(Attribute::NonNull) &&
- isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) {
+ isKnownNonZero(DerivedPtr, /*Depth=*/0,
+ getSimplifyQuery().getWithInstruction(&Call))) {
GCR.addRetAttr(Attribute::NonNull);
// We discovered new fact, re-check users.
Worklist.pushUsersToWorkList(GCR);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 7292bb62702aaa5..90550cdbdf89113 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1273,12 +1273,12 @@ Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
// if X non-zero and NoOverflow(X * Y)
// (icmp eq/ne Y)
- if (!XKnown.One.isZero() || isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (!XKnown.One.isZero() || isKnownNonZero(X, /*Depth=*/0, Q))
return new ICmpInst(Pred, Y, Cmp.getOperand(1));
// if Y non-zero and NoOverflow(X * Y)
// (icmp eq/ne X)
- if (!YKnown.One.isZero() || isKnownNonZero(Y, DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (!YKnown.One.isZero() || isKnownNonZero(Y, /*Depth=*/0, Q))
return new ICmpInst(Pred, X, Cmp.getOperand(1));
}
// Note, we are skipping cases:
@@ -3087,7 +3087,7 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
// (X + -1) <u C --> X <=u C (if X is never null)
if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
- if (llvm::isKnownNonZero(X, DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (llvm::isKnownNonZero(X, /*Depth=*/0, Q))
return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
}
@@ -4275,8 +4275,7 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst::Predicate Pred, Value *Op0,
// Look for: x & ~Mask pred ~Mask
if (isMaskOrZero(X, /*Not=*/true, Q)) {
- return !ICmpInst::isSigned(Pred) ||
- isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, /*Depth=*/0, Q);
}
return false;
}
@@ -4780,8 +4779,7 @@ static Instruction *foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q,
// icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
// icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
CmpInst::Predicate PredOut = CmpInst::getStrictPredicate(Pred);
- if (PredOut != Pred &&
- isKnownNonZero(A, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ if (PredOut != Pred && isKnownNonZero(A, /*Depth=*/0, Q))
return new ICmpInst(PredOut, Op0, Op1);
return nullptr;
@@ -5064,11 +5062,11 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
return new ICmpInst(Pred, C, D);
// (A - B) u>=/u< A --> B u>/u<= A iff B != 0
if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
- isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(B, /*Depth=*/0, Q))
return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
// C u<=/u> (C - D) --> C u</u>= D iff B != 0
if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
- isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ isKnownNonZero(D, /*Depth=*/0, Q))
return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
// icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
@@ -5110,14 +5108,13 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
// X * Z eq/ne Y * Z -> X eq/ne Y
if (ZKnown.countMaxTrailingZeros() == 0)
return new ICmpInst(Pred, X, Y);
- NonZero = !ZKnown.One.isZero() ||
- isKnownNonZero(Z, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, /*Depth=*/0, Q);
// if Z != 0 and nsw(X * Z) and nsw(Y * Z)
// X * Z eq/ne Y * Z -> X eq/ne Y
if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
return new ICmpInst(Pred, X, Y);
} else
- NonZero = isKnownNonZero(Z, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ NonZero = isKnownNonZero(Z, /*Depth=*/0, Q);
// If Z != 0 and nuw(X * Z) and nuw(Y * Z)
// X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 46bca4b722a03ae..9838e2aa9f3a24f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -1537,7 +1537,8 @@ Instruction *InstCombinerImpl::visitPHINode(PHINode &PN) {
for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
Instruction *CtxI = PN.getIncomingBlock(I)->getTerminator();
Value *VA = PN.getIncomingValue(I);
- if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
+ if (isKnownNonZero(VA, 0,
+ getSimplifyQuery().getWithInstruction(CtxI))) {
if (!NonZeroConst)
NonZeroConst = getAnyNonZeroConstInt(PN);
if (NonZeroConst != VA) {
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 7c40fb4fc86082d..4cfafa7ac80060d 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1431,7 +1431,7 @@ Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
if (OpsKnown[OpNo].hasKnownBits() &&
OpsKnown[OpNo].getKnownBits(SQ).isNonZero())
return true;
- return isKnownNonZero(IntOps[OpNo], SQ.DL);
+ return isKnownNonZero(IntOps[OpNo], /*Depth=*/0, SQ);
};
auto IsNonNeg = [&](unsigned OpNo) -> bool {
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index ee3531bbd68df30..a72b0ee9a08e014 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1281,7 +1281,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// ignored.
return;
}
- if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
+ if (llvm::isKnownNonZero(ConvertedShadow, /*Depth=*/0, DL)) {
// Copy origin as the value is definitely uninitialized.
paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
OriginAlignment);
@@ -1427,7 +1427,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Skip, value is initialized or const shadow is ignored.
continue;
}
- if (llvm::isKnownNonZero(ConvertedShadow, DL)) {
+ if (llvm::isKnownNonZero(ConvertedShadow, /*Depth=*/0, DL)) {
// Report as the value is definitely uninitialized.
insertWarningFn(IRB, ShadowData.Origin);
if (!MS.Recover)
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index adcf161b313b2b1..f376b5f7d68d4a9 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -459,7 +459,7 @@ static void convertMetadataToAssumes(LoadInst *LI, Value *Val,
// we can only do this if the value is known non-poison.
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
LI->getMetadata(LLVMContext::MD_noundef) &&
- !isKnownNonZero(Val, DL, 0, AC, LI, DT))
+ !isKnownNonZero(Val, /*Depth=*/0, SimplifyQuery(DL, DT, AC, LI)))
addAssumeNonNull(AC, LI);
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 2e68a9c01898c86..7e9e91606fe22de 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -305,7 +305,7 @@ static void annotateNonNullAndDereferenceable(CallInst *CI, ArrayRef<unsigned> A
if (ConstantInt *LenC = dyn_cast<ConstantInt>(Size)) {
annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
annotateDereferenceableBytes(CI, ArgNos, LenC->getZExtValue());
- } else if (isKnownNonZero(Size, DL)) {
+ } else if (isKnownNonZero(Size, /*Depth=*/0, DL)) {
annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
const APInt *X, *Y;
uint64_t DerefMin = 1;
@@ -394,7 +394,7 @@ Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilderBase &B) {
Value *Size = CI->getArgOperand(2);
uint64_t Len;
annotateNonNullNoUndefBasedOnAccess(CI, 0);
- if (isKnownNonZero(Size, DL))
+ if (isKnownNonZero(Size, /*Depth=*/0, DL))
annotateNonNullNoUndefBasedOnAccess(CI, 1);
// We don't do anything if length is not constant.
@@ -613,7 +613,7 @@ Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilderBase &B) {
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
return ConstantInt::get(CI->getType(), 0);
- if (isKnownNonZero(Size, DL))
+ if (isKnownNonZero(Size, /*Depth=*/0, DL))
annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
// Get the length argument if it is constant.
uint64_t Length;
@@ -749,7 +749,7 @@ Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilderBase &B) {
Value *LibCallSimplifier::optimizeStrLCpy(CallInst *CI, IRBuilderBase &B) {
Value *Size = CI->getArgOperand(2);
- if (isKnownNonZero(Size, DL))
+ if (isKnownNonZero(Size, /*Depth=*/0, DL))
// Like snprintf, the function stores into the destination only when
// the size argument is nonzero.
annotateNonNullNoUndefBasedOnAccess(CI, 0);
@@ -833,7 +833,7 @@ Value *LibCallSimplifier::optimizeStringNCpy(CallInst *CI, bool RetEnd,
Value *Src = CI->getArgOperand(1);
Value *Size = CI->getArgOperand(2);
- if (isKnownNonZero(Size, DL)) {
+ if (isKnownNonZero(Size, /*Depth=*/0, DL)) {
// Both st{p,r}ncpy(D, S, N) access the source and destination arrays
// only when N is nonzero.
annotateNonNullNoUndefBasedOnAccess(CI, 0);
@@ -926,7 +926,7 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilderBase &B,
Type *CharTy = B.getIntNTy(CharSize);
if (isOnlyUsedInZeroEqualityComparison(CI) &&
- (!Bound || isKnownNonZero(Bound, DL))) {
+ (!Bound || isKnownNonZero(Bound, /*Depth=*/0, DL))) {
// Fold strlen:
// strlen(x) != 0 --> *x != 0
// strlen(x) == 0 --> *x == 0
@@ -1047,7 +1047,7 @@ Value *LibCallSimplifier::optimizeStrNLen(CallInst *CI, IRBuilderBase &B) {
if (Value *V = optimizeStringLength(CI, B, 8, Bound))
return V;
- if (isKnownNonZero(Bound, DL))
+ if (isKnownNonZero(Bound, /*Depth=*/0, DL))
annotateNonNullNoUndefBasedOnAccess(CI, 0);
return nullptr;
}
@@ -1291,7 +1291,7 @@ Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilderBase &B) {
Value *SrcStr = CI->getArgOperand(0);
Value *Size = CI->getArgOperand(2);
- if (isKnownNonZero(Size, DL)) {
+ if (isKnownNonZero(Size, /*Depth=*/0, DL)) {
annotateNonNullNoUndefBasedOnAccess(CI, 0);
if (isOnlyUsedInEqualityComparison(CI, SrcStr))
return memChrToCharCompare(CI, Size, B, DL);
@@ -2976,7 +2976,7 @@ Value *LibCallSimplifier::optimizeStrToInt(CallInst *CI, IRBuilderBase &B,
// It would be readonly too, except that it still may write to errno.
CI->addParamAttr(0, Attribute::NoCapture);
EndPtr = nullptr;
- } else if (!isKnownNonZero(EndPtr, DL))
+ } else if (!isKnownNonZero(EndPtr, /*Depth=*/0, DL))
return nullptr;
StringRef Str;
@@ -3402,7 +3402,7 @@ Value *LibCallSimplifier::optimizeSnPrintF(CallInst *CI, IRBuilderBase &B) {
return V;
}
- if (isKnownNonZero(CI->getOperand(1), DL))
+ if (isKnownNonZero(CI->getOperand(1), /*Depth=*/0, DL))
annotateNonNullNoUndefBasedOnAccess(CI, 0);
return nullptr;
}
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 44cba60013afa3e..ff79af9ce52a714 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -885,7 +885,8 @@ bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
else
SafeToSpeculate = isSafeToSpeculativelyExecuteWithOpcode(
*FunctionalOpcode, &VPI, nullptr, &AC, &DT);
- if (!SafeToSpeculate && !isKnownNonZero(EVL, *DL, 0, &AC, &VPI, &DT))
+ if (!SafeToSpeculate &&
+ !isKnownNonZero(EVL, /*Depth=*/0, SimplifyQuery(*DL, &DT, &AC, &VPI)))
return false;
Value *ScalarVal =
diff --git a/llvm/test/Transforms/InstCombine/icmp-dom.ll b/llvm/test/Transforms/InstCombine/icmp-dom.ll
index 138254d912b259b..83cedd5ea9cb450 100644
--- a/llvm/test/Transforms/InstCombine/icmp-dom.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-dom.ll
@@ -413,9 +413,7 @@ define i1 @and_mask1_eq(i32 %conv) {
; CHECK: then:
; CHECK-NEXT: ret i1 false
; CHECK: else:
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 3
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
-; CHECK-NEXT: ret i1 [[CMP1]]
+; CHECK-NEXT: ret i1 false
;
entry:
%and = and i32 %conv, 1
@@ -440,9 +438,7 @@ define i1 @and_mask1_ne(i32 %conv) {
; CHECK: then:
; CHECK-NEXT: ret i1 false
; CHECK: else:
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 3
-; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND1]], 0
-; CHECK-NEXT: ret i1 [[CMP1]]
+; CHECK-NEXT: ret i1 true
;
entry:
%and = and i32 %conv, 1
@@ -514,8 +510,6 @@ else:
ret i1 %cmp1
}
-; TODO: %cmp1 can be folded into false.
-
define i1 @and_mask4(i32 %conv) {
; CHECK-LABEL: @and_mask4(
; CHECK-NEXT: entry:
@@ -525,9 +519,7 @@ define i1 @and_mask4(i32 %conv) {
; CHECK: then:
; CHECK-NEXT: ret i1 false
; CHECK: else:
-; CHECK-NEXT: [[AND1:%.*]] = and i32 [[CONV]], 7
-; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND1]], 0
-; CHECK-NEXT: ret i1 [[CMP1]]
+; CHECK-NEXT: ret i1 false
;
entry:
%and = and i32 %conv, 4
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index b4d2270d70703f2..6b1044876d366f3 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2055,7 +2055,8 @@ TEST_F(ValueTrackingTest, isNonZeroRecurrence) {
)");
const DataLayout &DL = M->getDataLayout();
AssumptionCache AC(*F);
- EXPECT_TRUE(isKnownNonZero(A, DL, 0, &AC, CxtI));
+ EXPECT_TRUE(isKnownNonZero(A, /*Depth=*/0,
+ SimplifyQuery(DL, /*DT=*/nullptr, &AC, CxtI)));
}
TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond) {
@@ -2078,8 +2079,10 @@ TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond) {
AssumptionCache AC(*F);
DominatorTree DT(*F);
const DataLayout &DL = M->getDataLayout();
- EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true);
- EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false);
+ const SimplifyQuery SQ(DL, &DT, &AC);
+ EXPECT_EQ(isKnownNonZero(A, /*Depth=*/0, SQ.getWithInstruction(CxtI)), true);
+ EXPECT_EQ(isKnownNonZero(A, /*Depth=*/0, SQ.getWithInstruction(CxtI2)),
+ false);
}
TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond2) {
@@ -2102,8 +2105,10 @@ TEST_F(ValueTrackingTest, KnownNonZeroFromDomCond2) {
AssumptionCache AC(*F);
DominatorTree DT(*F);
const DataLayout &DL = M->getDataLayout();
- EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI, &DT), true);
- EXPECT_EQ(isKnownNonZero(A, DL, 0, &AC, CxtI2, &DT), false);
+ const SimplifyQuery SQ(DL, &DT, &AC);
+ EXPECT_EQ(isKnownNonZero(A, /*Depth=*/0, SQ.getWithInstruction(CxtI)), true);
+ EXPECT_EQ(isKnownNonZero(A, /*Depth=*/0, SQ.getWithInstruction(CxtI2)),
+ false);
}
TEST_F(ValueTrackingTest, IsImpliedConditionAnd) {
>From 4af6856e9985c33e41e62625571a35616006f4a0 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 11 Apr 2024 23:34:30 +0800
Subject: [PATCH 3/4] [ValueTracking] Address review comments
---
llvm/lib/CodeGen/CodeGenPrepare.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 4225b2c3c48dac5..22dbb3198a9f17b 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2314,7 +2314,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
// Bail if the value is never zero.
Use &Op = CountZeros->getOperandUse(0);
- if (isKnownNonZero(Op, /*Depth=*/0, SimplifyQuery(*DL)))
+ if (isKnownNonZero(Op, /*Depth=*/0, *DL))
return false;
// The intrinsic will be sunk behind a compare against zero and branch.
>From e780f799300845b9604b58586b8e1d36df3d7880 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Fri, 12 Apr 2024 15:57:32 +0800
Subject: [PATCH 4/4] [ValueTracking] Fix clang build failure
---
clang/lib/CodeGen/CGCall.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 3f5463a9a70e9d8..7a0bc6fa77b8893 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -4124,7 +4124,8 @@ static bool isProvablyNull(llvm::Value *addr) {
}
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
- return llvm::isKnownNonZero(Addr.getBasePointer(), CGF.CGM.getDataLayout());
+ return llvm::isKnownNonZero(Addr.getBasePointer(), /*Depth=*/0,
+ CGF.CGM.getDataLayout());
}
/// Emit the actual writing-back of a writeback.
More information about the cfe-commits
mailing list