[llvm] 503e4b2 - [InstCombine] Perform some cleanups, add some tests
David Majnemer via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 31 12:32:44 PST 2025
Author: David Majnemer
Date: 2025-01-31T20:30:14Z
New Revision: 503e4b2d54ad81507d6072be522ce57b5393168c
URL: https://github.com/llvm/llvm-project/commit/503e4b2d54ad81507d6072be522ce57b5393168c
DIFF: https://github.com/llvm/llvm-project/commit/503e4b2d54ad81507d6072be522ce57b5393168c.diff
LOG: [InstCombine] Perform some cleanups, add some tests
No functional change is intended.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/test/Transforms/InstCombine/and-compare.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 5a4791870ac77bf..52b0a164de0aedc 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -37,11 +37,10 @@ using namespace PatternMatch;
// How many times is a select replaced by one of its operands?
STATISTIC(NumSel, "Number of select opts");
-
/// Compute Result = In1+In2, returning true if the result overflowed for this
/// type.
-static bool addWithOverflow(APInt &Result, const APInt &In1,
- const APInt &In2, bool IsSigned = false) {
+static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
+ bool IsSigned = false) {
bool Overflow;
if (IsSigned)
Result = In1.sadd_ov(In2, Overflow);
@@ -53,8 +52,8 @@ static bool addWithOverflow(APInt &Result, const APInt &In1,
/// Compute Result = In1-In2, returning true if the result overflowed for this
/// type.
-static bool subWithOverflow(APInt &Result, const APInt &In1,
- const APInt &In2, bool IsSigned = false) {
+static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
+ bool IsSigned = false) {
bool Overflow;
if (IsSigned)
Result = In1.ssub_ov(In2, Overflow);
@@ -810,15 +809,15 @@ Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
// If the GEPs only
diff er by one index, compare it.
- unsigned NumDifferences = 0; // Keep track of #
diff erences.
- unsigned DiffOperand = 0; // The operand that
diff ers.
+ unsigned NumDifferences = 0; // Keep track of #
diff erences.
+ unsigned DiffOperand = 0; // The operand that
diff ers.
for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
Type *LHSType = GEPLHS->getOperand(i)->getType();
Type *RHSType = GEPRHS->getOperand(i)->getType();
// FIXME: Better support for vector of pointers.
if (LHSType->getPrimitiveSizeInBits() !=
- RHSType->getPrimitiveSizeInBits() ||
+ RHSType->getPrimitiveSizeInBits() ||
(GEPLHS->getType()->isVectorTy() &&
(!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
// Irreconcilable
diff erences.
@@ -826,13 +825,15 @@ Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
break;
}
- if (NumDifferences++) break;
+ if (NumDifferences++)
+ break;
DiffOperand = i;
}
- if (NumDifferences == 0) // SAME GEP?
- return replaceInstUsesWith(I, // No comparison is needed here.
- ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
+ if (NumDifferences == 0) // SAME GEP?
+ return replaceInstUsesWith(
+ I, // No comparison is needed here.
+ ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
else if (NumDifferences == 1 && CanFold(NW)) {
Value *LHSV = GEPLHS->getOperand(DiffOperand);
@@ -910,8 +911,8 @@ bool InstCombinerImpl::foldAllocaCmp(AllocaInst *Alloca) {
case 2: {
// The alloca is only used in one icmp operand. Assume that the
// equality is false.
- auto *Res = ConstantInt::get(
- ICmp->getType(), ICmp->getPredicate() == ICmpInst::ICMP_NE);
+ auto *Res = ConstantInt::get(ICmp->getType(),
+ ICmp->getPredicate() == ICmpInst::ICMP_NE);
replaceInstUsesWith(*ICmp, Res);
eraseInstFromFunction(*ICmp);
Changed = true;
@@ -942,8 +943,8 @@ Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
// (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
// (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
- Constant *R = ConstantInt::get(X->getType(),
- APInt::getMaxValue(C.getBitWidth()) - C);
+ Constant *R =
+ ConstantInt::get(X->getType(), APInt::getMaxValue(C.getBitWidth()) - C);
return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
}
@@ -1738,8 +1739,8 @@ Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
} else {
Value *NewAnd = Builder.CreateAnd(
Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
- return new ICmpInst(Cmp.getPredicate(),
- NewAnd, ConstantInt::get(And->getType(), NewCmpCst));
+ return new ICmpInst(Cmp.getPredicate(), NewAnd,
+ ConstantInt::get(And->getType(), NewCmpCst));
}
}
@@ -2012,13 +2013,12 @@ Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
Value *A;
const APInt *Addend, *Msk;
if (match(And, m_And(m_OneUse(m_Add(m_Value(A), m_APInt(Addend))),
- m_APInt(Msk))) &&
- Msk->isMask() && C.ule(*Msk)) {
+ m_LowBitMask(Msk))) &&
+ C.ule(*Msk)) {
APInt NewComperand = (C - *Addend) & *Msk;
- Value* MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
- return new ICmpInst(
- Pred, MaskA,
- Constant::getIntegerValue(MaskA->getType(), NewComperand));
+ Value *MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
+ return new ICmpInst(Pred, MaskA,
+ ConstantInt::get(MaskA->getType(), NewComperand));
}
}
@@ -2319,7 +2319,7 @@ static Instruction *foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl,
// (1 << Y) < 1 -> Y == 31
// (1 << Y) < C -> Y == 31 if C is negative and not signed min.
// Exclude signed min by subtracting 1 and lower the upper bound to 0.
- if (Pred == ICmpInst::ICMP_SLT && (C-1).sle(0))
+ if (Pred == ICmpInst::ICMP_SLT && (C - 1).sle(0))
return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
}
@@ -2804,7 +2804,7 @@ Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
// (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
// (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
- (!DivIsSigned || C.isMinSignedValue())) {
+ (!DivIsSigned || C.isMinSignedValue())) {
Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
@@ -3357,14 +3357,14 @@ Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
// the entire original Cmp can be simplified to a false.
Value *Cond = Builder.getFalse();
if (TrueWhenLessThan)
- Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
- OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(
+ Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
if (TrueWhenEqual)
- Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
- OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(
+ Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
if (TrueWhenGreaterThan)
- Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
- OrigLHS, OrigRHS));
+ Cond = Builder.CreateOr(
+ Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
return replaceInstUsesWith(Cmp, Cond);
}
@@ -3806,8 +3806,8 @@ Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
: APInt::getHighBitsSet(BitWidth, Num + 1);
APInt Mask2 = IsTrailing
- ? APInt::getOneBitSet(BitWidth, Num)
- : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
+ ? APInt::getOneBitSet(BitWidth, Num)
+ : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
ConstantInt::get(Ty, Mask2));
}
@@ -4867,18 +4867,17 @@ Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) {
return nullptr; // Wrong predicate.
}
} else // Look for: ((x * y) / x) !=/== y
- if (I.isEquality() &&
- match(&I,
- m_c_ICmp(Pred, m_Value(Y),
- m_CombineAnd(
- m_OneUse(m_IDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
- m_Value(X)),
- m_Instruction(Mul)),
- m_Deferred(X))),
- m_Instruction(Div))))) {
- NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
- } else
- return nullptr;
+ if (I.isEquality() &&
+ match(&I, m_c_ICmp(Pred, m_Value(Y),
+ m_CombineAnd(m_OneUse(m_IDiv(
+ m_CombineAnd(m_c_Mul(m_Deferred(Y),
+ m_Value(X)),
+ m_Instruction(Mul)),
+ m_Deferred(X))),
+ m_Instruction(Div))))) {
+ NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
+ } else
+ return nullptr;
BuilderTy::InsertPointGuard Guard(Builder);
// If the pattern included (x * y), we'll want to insert new instructions
@@ -5377,7 +5376,7 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
ConstantExpr::getNeg(RHSC));
}
- if (Instruction * R = foldICmpXorXX(I, Q, *this))
+ if (Instruction *R = foldICmpXorXX(I, Q, *this))
return R;
if (Instruction *R = foldICmpOrXX(I, Q, *this))
return R;
@@ -5914,8 +5913,8 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
if (ShAmt < TypeBits && ShAmt != 0) {
Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
- Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
- I.getName() + ".mask");
+ Value *And =
+ Builder.CreateAnd(Xor, Builder.getInt(AndVal), I.getName() + ".mask");
return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
}
}
@@ -5947,10 +5946,10 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
if (Instruction *ICmp = foldICmpIntrinsicWithIntrinsic(I, Builder))
return ICmp;
- // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks the
- // top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s INT_MAX",
- // which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a few steps
- // of instcombine.
+ // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks
+ // the top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s
+ // INT_MAX", which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a
+ // few steps of instcombine.
unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
match(Op1, m_Trunc(m_LShr(m_Specific(A), m_SpecificInt(BitWidth)))) &&
@@ -6084,10 +6083,10 @@ Instruction *InstCombinerImpl::foldICmpWithZextOrSext(ICmpInst &ICmp) {
bool IsZext1 = isa<ZExtInst>(ICmp.getOperand(1));
if (IsZext0 != IsZext1) {
- // If X and Y and both i1
- // (icmp eq/ne (zext X) (sext Y))
- // eq -> (icmp eq (or X, Y), 0)
- // ne -> (icmp ne (or X, Y), 0)
+ // If X and Y and both i1
+ // (icmp eq/ne (zext X) (sext Y))
+ // eq -> (icmp eq (or X, Y), 0)
+ // ne -> (icmp ne (or X, Y), 0)
if (ICmp.isEquality() && X->getType()->isIntOrIntVectorTy(1) &&
Y->getType()->isIntOrIntVectorTy(1))
return new ICmpInst(ICmp.getPredicate(), Builder.CreateOr(X, Y),
@@ -6247,16 +6246,17 @@ Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
return foldICmpWithZextOrSext(ICmp);
}
-static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned) {
+static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS,
+ bool IsSigned) {
switch (BinaryOp) {
- default:
- llvm_unreachable("Unsupported binary op");
- case Instruction::Add:
- case Instruction::Sub:
- return match(RHS, m_Zero());
- case Instruction::Mul:
- return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
- match(RHS, m_One());
+ default:
+ llvm_unreachable("Unsupported binary op");
+ case Instruction::Add:
+ case Instruction::Sub:
+ return match(RHS, m_Zero());
+ case Instruction::Mul:
+ return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
+ match(RHS, m_One());
}
}
@@ -6265,23 +6265,23 @@ InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
bool IsSigned, Value *LHS, Value *RHS,
Instruction *CxtI) const {
switch (BinaryOp) {
- default:
- llvm_unreachable("Unsupported binary op");
- case Instruction::Add:
- if (IsSigned)
- return computeOverflowForSignedAdd(LHS, RHS, CxtI);
- else
- return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
- case Instruction::Sub:
- if (IsSigned)
- return computeOverflowForSignedSub(LHS, RHS, CxtI);
- else
- return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
- case Instruction::Mul:
- if (IsSigned)
- return computeOverflowForSignedMul(LHS, RHS, CxtI);
- else
- return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
+ default:
+ llvm_unreachable("Unsupported binary op");
+ case Instruction::Add:
+ if (IsSigned)
+ return computeOverflowForSignedAdd(LHS, RHS, CxtI);
+ else
+ return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
+ case Instruction::Sub:
+ if (IsSigned)
+ return computeOverflowForSignedSub(LHS, RHS, CxtI);
+ else
+ return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
+ case Instruction::Mul:
+ if (IsSigned)
+ return computeOverflowForSignedMul(LHS, RHS, CxtI);
+ else
+ return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
}
}
@@ -6310,25 +6310,25 @@ bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
}
switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
- case OverflowResult::MayOverflow:
- return false;
- case OverflowResult::AlwaysOverflowsLow:
- case OverflowResult::AlwaysOverflowsHigh:
- Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
- Result->takeName(&OrigI);
- Overflow = ConstantInt::getTrue(OverflowTy);
- return true;
- case OverflowResult::NeverOverflows:
- Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
- Result->takeName(&OrigI);
- Overflow = ConstantInt::getFalse(OverflowTy);
- if (auto *Inst = dyn_cast<Instruction>(Result)) {
- if (IsSigned)
- Inst->setHasNoSignedWrap();
- else
- Inst->setHasNoUnsignedWrap();
- }
- return true;
+ case OverflowResult::MayOverflow:
+ return false;
+ case OverflowResult::AlwaysOverflowsLow:
+ case OverflowResult::AlwaysOverflowsHigh:
+ Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
+ Result->takeName(&OrigI);
+ Overflow = ConstantInt::getTrue(OverflowTy);
+ return true;
+ case OverflowResult::NeverOverflows:
+ Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
+ Result->takeName(&OrigI);
+ Overflow = ConstantInt::getFalse(OverflowTy);
+ if (auto *Inst = dyn_cast<Instruction>(Result)) {
+ if (IsSigned)
+ Inst->setHasNoSignedWrap();
+ else
+ Inst->setHasNoUnsignedWrap();
+ }
+ return true;
}
llvm_unreachable("Unexpected overflow result");
@@ -6993,21 +6993,21 @@ static Instruction *canonicalizeICmpBool(ICmpInst &I,
// Cases not handled by InstSimplify are always 'not' of Op0.
if (match(B, m_Zero())) {
switch (I.getPredicate()) {
- case CmpInst::ICMP_EQ: // A == 0 -> !A
- case CmpInst::ICMP_ULE: // A <=u 0 -> !A
- case CmpInst::ICMP_SGE: // A >=s 0 -> !A
- return BinaryOperator::CreateNot(A);
- default:
- llvm_unreachable("ICmp i1 X, C not simplified as expected.");
+ case CmpInst::ICMP_EQ: // A == 0 -> !A
+ case CmpInst::ICMP_ULE: // A <=u 0 -> !A
+ case CmpInst::ICMP_SGE: // A >=s 0 -> !A
+ return BinaryOperator::CreateNot(A);
+ default:
+ llvm_unreachable("ICmp i1 X, C not simplified as expected.");
}
} else if (match(B, m_One())) {
switch (I.getPredicate()) {
- case CmpInst::ICMP_NE: // A != 1 -> !A
- case CmpInst::ICMP_ULT: // A <u 1 -> !A
- case CmpInst::ICMP_SGT: // A >s -1 -> !A
- return BinaryOperator::CreateNot(A);
- default:
- llvm_unreachable("ICmp i1 X, C not simplified as expected.");
+ case CmpInst::ICMP_NE: // A != 1 -> !A
+ case CmpInst::ICMP_ULT: // A <u 1 -> !A
+ case CmpInst::ICMP_SGT: // A >s -1 -> !A
+ return BinaryOperator::CreateNot(A);
+ default:
+ llvm_unreachable("ICmp i1 X, C not simplified as expected.");
}
}
@@ -7193,8 +7193,8 @@ static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
// extract(uadd.with.overflow(A, 1), 0) == 0
// extract(uadd.with.overflow(A, -1), 0) != -1
UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
- else if (match(Op1, UAddOvResultPat) &&
- Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
+ else if (match(Op1, UAddOvResultPat) && Pred == ICmpInst::ICMP_UGT &&
+ (Op0 == A || Op0 == B))
// A > extract(uadd.with.overflow(A, B), 0)
UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
else
@@ -7410,8 +7410,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
if (Value *V = dyn_castNegVal(SelectTrue)) {
if (V == SelectFalse)
return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
- }
- else if (Value *V = dyn_castNegVal(SelectFalse)) {
+ } else if (Value *V = dyn_castNegVal(SelectFalse)) {
if (V == SelectTrue)
return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
}
@@ -7552,7 +7551,8 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
// Try to optimize equality comparisons against alloca-based pointers.
if (Op0->getType()->isPointerTy() && I.isEquality()) {
- assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
+ assert(Op1->getType()->isPointerTy() &&
+ "Comparing pointer with non-pointer?");
if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
if (foldAllocaCmp(Alloca))
return nullptr;
@@ -7722,7 +7722,8 @@ Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
// Get the width of the mantissa. We don't want to hack on conversions that
// might lose information from the integer, e.g. "i64 -> float"
int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
- if (MantissaWidth == -1) return nullptr; // Unknown.
+ if (MantissaWidth == -1)
+ return nullptr; // Unknown.
Type *IntTy = LHSI->getOperand(0)->getType();
unsigned IntWidth = IntTy->getScalarSizeInBits();
@@ -7784,7 +7785,8 @@ Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
ICmpInst::Predicate Pred;
switch (I.getPredicate()) {
- default: llvm_unreachable("Unexpected predicate!");
+ default:
+ llvm_unreachable("Unexpected predicate!");
case FCmpInst::FCMP_UEQ:
case FCmpInst::FCMP_OEQ:
Pred = ICmpInst::ICMP_EQ;
@@ -7826,7 +7828,7 @@ Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
APFloat::rmNearestTiesToEven);
if (SMax < *RHS) { // smax < 13123.0
- if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
+ if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
Pred == ICmpInst::ICMP_SLE)
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
@@ -7838,7 +7840,7 @@ Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
APFloat::rmNearestTiesToEven);
if (UMax < *RHS) { // umax < 13123.0
- if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
+ if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
Pred == ICmpInst::ICMP_ULE)
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
@@ -7882,10 +7884,11 @@ Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
// the compare predicate and sometimes the value. RHSC is rounded towards
// zero at this point.
switch (Pred) {
- default: llvm_unreachable("Unexpected integer comparison!");
- case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
+ default:
+ llvm_unreachable("Unexpected integer comparison!");
+ case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
- case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
+ case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
case ICmpInst::ICMP_ULE:
// (float)int <= 4.4 --> int <= 4
@@ -8313,20 +8316,21 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
assert(OpType == Op1->getType() && "fcmp with
diff erent-typed operands?");
if (Op0 == Op1) {
switch (Pred) {
- default: break;
- case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
- case FCmpInst::FCMP_ULT: // True if unordered or less than
- case FCmpInst::FCMP_UGT: // True if unordered or greater than
- case FCmpInst::FCMP_UNE: // True if unordered or not equal
+ default:
+ break;
+ case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
+ case FCmpInst::FCMP_ULT: // True if unordered or less than
+ case FCmpInst::FCMP_UGT: // True if unordered or greater than
+ case FCmpInst::FCMP_UNE: // True if unordered or not equal
// Canonicalize these to be 'fcmp uno %X, 0.0'.
I.setPredicate(FCmpInst::FCMP_UNO);
I.setOperand(1, Constant::getNullValue(OpType));
return &I;
- case FCmpInst::FCMP_ORD: // True if ordered (no nans)
- case FCmpInst::FCMP_OEQ: // True if ordered and equal
- case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
- case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
+ case FCmpInst::FCMP_ORD: // True if ordered (no nans)
+ case FCmpInst::FCMP_OEQ: // True if ordered and equal
+ case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
+ case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
// Canonicalize these to be 'fcmp ord %X, 0.0'.
I.setPredicate(FCmpInst::FCMP_ORD);
I.setOperand(1, Constant::getNullValue(OpType));
@@ -8484,7 +8488,7 @@ Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
cast<LoadInst>(LHSI), GEP, GV, I))
return Res;
break;
- }
+ }
}
if (Instruction *R = foldFabsWithFcmpZero(I, *this))
diff --git a/llvm/test/Transforms/InstCombine/and-compare.ll b/llvm/test/Transforms/InstCombine/and-compare.ll
index c8f93410f9e9c8f..3a59aca0fa8e134 100644
--- a/llvm/test/Transforms/InstCombine/and-compare.ll
+++ b/llvm/test/Transforms/InstCombine/and-compare.ll
@@ -256,3 +256,27 @@ entry:
%cmp = icmp ne i8 %and, 11
ret i1 %cmp
}
+
+define i1 @test_eq_16_and_15_add_10(i8 %a) {
+; CHECK-LABEL: @test_eq_16_and_15_add_10(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %add = add i8 %a, 10
+ %and = and i8 %add, 15
+ %cmp = icmp eq i8 %and, 16
+ ret i1 %cmp
+}
+
+define i1 @test_ne_16_and_15_add_10(i8 %a) {
+; CHECK-LABEL: @test_ne_16_and_15_add_10(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: ret i1 true
+;
+entry:
+ %add = add i8 %a, 10
+ %and = and i8 %add, 15
+ %cmp = icmp ne i8 %and, 16
+ ret i1 %cmp
+}
More information about the llvm-commits
mailing list