[llvm] r276312 - make InstCombine compare helper functions private; NFC
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 21 11:07:40 PDT 2016
Author: spatel
Date: Thu Jul 21 13:07:40 2016
New Revision: 276312
URL: http://llvm.org/viewvc/llvm-project?rev=276312&view=rev
Log:
make InstCombine compare helper functions private; NFC
Also, rename some of them for consistency and to follow current conventions.
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=276312&r1=276311&r2=276312&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp Thu Jul 21 13:07:40 2016
@@ -231,9 +231,10 @@ static void ComputeUnsignedMinMaxValuesF
///
/// If AndCst is non-null, then the loaded value is masked with that constant
/// before doing the comparison. This handles cases like "A[i]&4 == 0".
-Instruction *InstCombiner::
-FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
- CmpInst &ICI, ConstantInt *AndCst) {
+Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
+ GlobalVariable *GV,
+ CmpInst &ICI,
+ ConstantInt *AndCst) {
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
return nullptr;
@@ -919,7 +920,7 @@ static Instruction *transformToIndexedCo
/// Fold comparisons between a GEP instruction and something else. At this point
/// we know that the GEP is on the LHS of the comparison.
-Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
+Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
ICmpInst::Predicate Cond,
Instruction &I) {
// Don't transform signed compares of GEPs into index compares. Even if the
@@ -1003,12 +1004,12 @@ Instruction *InstCombiner::FoldGEPICmp(G
// If one of the GEPs has all zero indices, recurse.
if (GEPLHS->hasAllZeroIndices())
- return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
+ return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
ICmpInst::getSwappedPredicate(Cond), I);
// If the other GEP has all zero indices, recurse.
if (GEPRHS->hasAllZeroIndices())
- return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
+ return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
@@ -1056,7 +1057,7 @@ Instruction *InstCombiner::FoldGEPICmp(G
return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
}
-Instruction *InstCombiner::FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca,
+Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca,
Value *Other) {
assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
@@ -1134,9 +1135,9 @@ Instruction *InstCombiner::FoldAllocaCmp
}
/// Fold "icmp pred (X+CI), X".
-Instruction *InstCombiner::FoldICmpAddOpCst(Instruction &ICI,
- Value *X, ConstantInt *CI,
- ICmpInst::Predicate Pred) {
+Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
+ Value *X, ConstantInt *CI,
+ ICmpInst::Predicate Pred) {
// From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
// so the values can never be equal. Similarly for all other "or equals"
// operators.
@@ -1183,8 +1184,8 @@ Instruction *InstCombiner::FoldICmpAddOp
/// Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS and CmpRHS are
/// both known to be integer constants.
-Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
- ConstantInt *DivRHS) {
+Instruction *InstCombiner::foldICmpDivConst(ICmpInst &ICI, BinaryOperator *DivI,
+ ConstantInt *DivRHS) {
ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
const APInt &CmpRHSV = CmpRHS->getValue();
@@ -1339,8 +1340,8 @@ Instruction *InstCombiner::FoldICmpDivCs
}
/// Handle "icmp(([al]shr X, cst1), cst2)".
-Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
- ConstantInt *ShAmt) {
+Instruction *InstCombiner::foldICmpShrConst(ICmpInst &ICI, BinaryOperator *Shr,
+ ConstantInt *ShAmt) {
const APInt &CmpRHSV = cast<ConstantInt>(ICI.getOperand(1))->getValue();
// Check that the shift amount is in range. If not, don't perform
@@ -1386,7 +1387,7 @@ Instruction *InstCombiner::FoldICmpShrCs
assert(TheDiv->getOpcode() == Instruction::SDiv ||
TheDiv->getOpcode() == Instruction::UDiv);
- Instruction *Res = FoldICmpDivCst(ICI, TheDiv, cast<ConstantInt>(DivCst));
+ Instruction *Res = foldICmpDivConst(ICI, TheDiv, cast<ConstantInt>(DivCst));
assert(Res && "This div/cst should have folded!");
return Res;
}
@@ -1427,7 +1428,7 @@ Instruction *InstCombiner::FoldICmpShrCs
/// Handle "(icmp eq/ne (ashr/lshr const2, A), const1)" ->
/// (icmp eq/ne A, Log2(const2/const1)) ->
/// (icmp eq/ne A, Log2(const2) - Log2(const1)).
-Instruction *InstCombiner::FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
+Instruction *InstCombiner::foldICmpCstShrConst(ICmpInst &I, Value *Op, Value *A,
ConstantInt *CI1,
ConstantInt *CI2) {
assert(I.isEquality() && "Cannot fold icmp gt/lt");
@@ -1491,9 +1492,9 @@ Instruction *InstCombiner::FoldICmpCstSh
/// Handle "(icmp eq/ne (shl const2, A), const1)" ->
/// (icmp eq/ne A, TrailingZeros(const1) - TrailingZeros(const2)).
-Instruction *InstCombiner::FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
- ConstantInt *CI1,
- ConstantInt *CI2) {
+Instruction *InstCombiner::foldICmpCstShlConst(ICmpInst &I, Value *Op, Value *A,
+ ConstantInt *CI1,
+ ConstantInt *CI2) {
assert(I.isEquality() && "Cannot fold icmp gt/lt");
auto getConstant = [&I, this](bool IsTrue) {
@@ -1535,9 +1536,9 @@ Instruction *InstCombiner::FoldICmpCstSh
}
/// Handle "icmp (instr, intcst)".
-Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
- Instruction *LHSI,
- ConstantInt *RHS) {
+Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &ICI,
+ Instruction *LHSI,
+ ConstantInt *RHS) {
const APInt &RHSV = RHS->getValue();
switch (LHSI->getOpcode()) {
@@ -1839,7 +1840,7 @@ Instruction *InstCombiner::visitICmpInst
if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
!LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
+ if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
return Res;
}
}
@@ -2077,7 +2078,7 @@ Instruction *InstCombiner::visitICmpInst
// Handle equality comparisons of shift-by-constant.
BinaryOperator *BO = cast<BinaryOperator>(LHSI);
if (ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
- if (Instruction *Res = FoldICmpShrCst(ICI, BO, ShAmt))
+ if (Instruction *Res = foldICmpShrConst(ICI, BO, ShAmt))
return Res;
}
@@ -2118,7 +2119,7 @@ Instruction *InstCombiner::visitICmpInst
// it, otherwise compute the range [low, hi) bounding the new value.
// See: InsertRangeTest above for the kinds of replacements possible.
if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
- if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
+ if (Instruction *R = foldICmpDivConst(ICI, cast<BinaryOperator>(LHSI),
DivRHS))
return R;
break;
@@ -2200,9 +2201,9 @@ Instruction *InstCombiner::visitICmpInst
}
/// Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
-Instruction *InstCombiner::visitICmpEqualityWithConstant(ICmpInst &ICI,
- Instruction *LHSI,
- ConstantInt *RHS) {
+Instruction *InstCombiner::foldICmpEqualityWithConstant(ICmpInst &ICI,
+ Instruction *LHSI,
+ ConstantInt *RHS) {
if (!ICI.isEquality())
return nullptr;
@@ -2379,7 +2380,7 @@ Instruction *InstCombiner::visitICmpEqua
/// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so
/// far.
-Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICmp) {
+Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0));
Value *LHSCIOp = LHSCI->getOperand(0);
Type *SrcTy = LHSCIOp->getType();
@@ -3320,12 +3321,12 @@ Instruction *InstCombiner::visitICmpInst
if (match(Op0, m_AShr(m_ConstantInt(CI2), m_Value(A))) ||
match(Op0, m_LShr(m_ConstantInt(CI2), m_Value(A)))) {
// (icmp eq/ne (ashr/lshr const2, A), const1)
- if (Instruction *Inst = FoldICmpCstShrCst(I, Op0, A, CI, CI2))
+ if (Instruction *Inst = foldICmpCstShrConst(I, Op0, A, CI, CI2))
return Inst;
}
if (match(Op0, m_Shl(m_ConstantInt(CI2), m_Value(A)))) {
// (icmp eq/ne (shl const2, A), const1)
- if (Instruction *Inst = FoldICmpCstShlCst(I, Op0, A, CI, CI2))
+ if (Instruction *Inst = foldICmpCstShlConst(I, Op0, A, CI, CI2))
return Inst;
}
}
@@ -3629,9 +3630,9 @@ Instruction *InstCombiner::visitICmpInst
// instruction, see if that instruction also has constants so that the
// instruction can be folded into the icmp
if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) {
- if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
+ if (Instruction *Res = foldICmpWithConstant(I, LHSI, CI))
return Res;
- if (Instruction *Res = visitICmpEqualityWithConstant(I, LHSI, CI))
+ if (Instruction *Res = foldICmpEqualityWithConstant(I, LHSI, CI))
return Res;
}
// (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
@@ -3726,7 +3727,7 @@ Instruction *InstCombiner::visitICmpInst
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
!cast<LoadInst>(LHSI)->isVolatile())
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
+ if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
return Res;
}
break;
@@ -3735,10 +3736,10 @@ Instruction *InstCombiner::visitICmpInst
// If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
- if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
+ if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
return NI;
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
- if (Instruction *NI = FoldGEPICmp(GEP, Op0,
+ if (Instruction *NI = foldGEPICmp(GEP, Op0,
ICmpInst::getSwappedPredicate(I.getPredicate()), I))
return NI;
@@ -3746,10 +3747,10 @@ Instruction *InstCombiner::visitICmpInst
if (Op0->getType()->isPointerTy() && I.isEquality()) {
assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
- if (Instruction *New = FoldAllocaCmp(I, Alloca, Op1))
+ if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
return New;
if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
- if (Instruction *New = FoldAllocaCmp(I, Alloca, Op0))
+ if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
return New;
}
@@ -3789,7 +3790,7 @@ Instruction *InstCombiner::visitICmpInst
// For generality, we handle any zero-extension of any operand comparison
// with a constant or another cast from the same type.
if (isa<Constant>(Op1) || isa<CastInst>(Op1))
- if (Instruction *R = visitICmpInstWithCastAndCast(I))
+ if (Instruction *R = foldICmpWithCastAndCast(I))
return R;
}
@@ -4293,18 +4294,17 @@ Instruction *InstCombiner::visitICmpInst
Value *X; ConstantInt *Cst;
// icmp X+Cst, X
if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
- return FoldICmpAddOpCst(I, X, Cst, I.getPredicate());
+ return foldICmpAddOpConst(I, X, Cst, I.getPredicate());
// icmp X, X+Cst
if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
- return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate());
+ return foldICmpAddOpConst(I, X, Cst, I.getSwappedPredicate());
}
return Changed ? &I : nullptr;
}
/// Fold fcmp ([us]itofp x, cst) if possible.
-Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
- Instruction *LHSI,
+Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
Constant *RHSC) {
if (!isa<ConstantFP>(RHSC)) return nullptr;
const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
@@ -4650,7 +4650,7 @@ Instruction *InstCombiner::visitFCmpInst
break;
case Instruction::SIToFP:
case Instruction::UIToFP:
- if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
+ if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
return NV;
break;
case Instruction::FSub: {
@@ -4667,7 +4667,7 @@ Instruction *InstCombiner::visitFCmpInst
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
!cast<LoadInst>(LHSI)->isVolatile())
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
+ if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
return Res;
}
break;
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h?rev=276312&r1=276311&r2=276312&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h Thu Jul 21 13:07:40 2016
@@ -262,31 +262,8 @@ public:
Instruction *visitAShr(BinaryOperator &I);
Instruction *visitLShr(BinaryOperator &I);
Instruction *commonShiftTransforms(BinaryOperator &I);
- Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
- Constant *RHSC);
- Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
- GlobalVariable *GV, CmpInst &ICI,
- ConstantInt *AndCst = nullptr);
Instruction *visitFCmpInst(FCmpInst &I);
Instruction *visitICmpInst(ICmpInst &I);
- Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
- Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI, Instruction *LHS,
- ConstantInt *RHS);
- Instruction *visitICmpEqualityWithConstant(ICmpInst &ICI, Instruction *LHS,
- ConstantInt *RHS);
- Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
- ConstantInt *DivRHS);
- Instruction *FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *DivI,
- ConstantInt *DivRHS);
- Instruction *FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
- ConstantInt *CI1, ConstantInt *CI2);
- Instruction *FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
- ConstantInt *CI1, ConstantInt *CI2);
- Instruction *FoldICmpAddOpCst(Instruction &ICI, Value *X, ConstantInt *CI,
- ICmpInst::Predicate Pred);
- Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
- ICmpInst::Predicate Cond, Instruction &I);
- Instruction *FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca, Value *Other);
Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1,
BinaryOperator &I);
Instruction *commonCastTransforms(CastInst &CI);
@@ -585,6 +562,30 @@ private:
Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
+ Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
+ ICmpInst::Predicate Cond, Instruction &I);
+ Instruction *foldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca, Value *Other);
+ Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
+ GlobalVariable *GV, CmpInst &ICI,
+ ConstantInt *AndCst = nullptr);
+ Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
+ Constant *RHSC);
+ Instruction *foldICmpDivConst(ICmpInst &ICI, BinaryOperator *DivI,
+ ConstantInt *DivRHS);
+ Instruction *foldICmpShrConst(ICmpInst &ICI, BinaryOperator *DivI,
+ ConstantInt *DivRHS);
+ Instruction *foldICmpCstShrConst(ICmpInst &I, Value *Op, Value *A,
+ ConstantInt *CI1, ConstantInt *CI2);
+ Instruction *foldICmpCstShlConst(ICmpInst &I, Value *Op, Value *A,
+ ConstantInt *CI1, ConstantInt *CI2);
+ Instruction *foldICmpAddOpConst(Instruction &ICI, Value *X, ConstantInt *CI,
+ ICmpInst::Predicate Pred);
+ Instruction *foldICmpWithCastAndCast(ICmpInst &ICI);
+ Instruction *foldICmpWithConstant(ICmpInst &ICI, Instruction *LHS,
+ ConstantInt *RHS);
+ Instruction *foldICmpEqualityWithConstant(ICmpInst &ICI, Instruction *LHS,
+ ConstantInt *RHS);
+
Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
ConstantInt *AndRHS, BinaryOperator &TheAnd);
More information about the llvm-commits
mailing list