[llvm] r316503 - [Transforms] Fix some Clang-tidy modernize and Include What You Use warnings; other minor fixes (NFC).
Eugene Zelenko via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 24 14:24:54 PDT 2017
Author: eugenezelenko
Date: Tue Oct 24 14:24:53 2017
New Revision: 316503
URL: http://llvm.org/viewvc/llvm-project?rev=316503&view=rev
Log:
[Transforms] Fix some Clang-tidy modernize and Include What You Use warnings; other minor fixes (NFC).
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/trunk/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp Tue Oct 24 14:24:53 2017
@@ -12,12 +12,26 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/KnownBits.h"
+#include <cassert>
+#include <utility>
using namespace llvm;
using namespace PatternMatch;
@@ -39,10 +53,15 @@ namespace {
// is expensive. In order to avoid the cost of the constructor, we should
// reuse some instances whenever possible. The pre-created instances
// FAddCombine::Add[0-5] embodies this idea.
- //
- FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {}
+ FAddendCoef() = default;
~FAddendCoef();
+ // If possible, don't define operator+/operator- etc because these
+ // operators inevitably call FAddendCoef's constructor which is not cheap.
+ void operator=(const FAddendCoef &A);
+ void operator+=(const FAddendCoef &A);
+ void operator*=(const FAddendCoef &S);
+
void set(short C) {
assert(!insaneIntVal(C) && "Insane coefficient");
IsFp = false; IntVal = C;
@@ -55,12 +74,6 @@ namespace {
bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
Value *getValue(Type *) const;
- // If possible, don't define operator+/operator- etc because these
- // operators inevitably call FAddendCoef's constructor which is not cheap.
- void operator=(const FAddendCoef &A);
- void operator+=(const FAddendCoef &A);
- void operator*=(const FAddendCoef &S);
-
bool isOne() const { return isInt() && IntVal == 1; }
bool isTwo() const { return isInt() && IntVal == 2; }
bool isMinusOne() const { return isInt() && IntVal == -1; }
@@ -68,10 +81,12 @@ namespace {
private:
bool insaneIntVal(int V) { return V > 4 || V < -4; }
+
APFloat *getFpValPtr()
- { return reinterpret_cast<APFloat*>(&FpValBuf.buffer[0]); }
+ { return reinterpret_cast<APFloat *>(&FpValBuf.buffer[0]); }
+
const APFloat *getFpValPtr() const
- { return reinterpret_cast<const APFloat*>(&FpValBuf.buffer[0]); }
+ { return reinterpret_cast<const APFloat *>(&FpValBuf.buffer[0]); }
const APFloat &getFpVal() const {
assert(IsFp && BufHasFpVal && "Incorret state");
@@ -94,17 +109,16 @@ namespace {
// from an *SIGNED* integer.
APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
- private:
- bool IsFp;
+ bool IsFp = false;
// True iff FpValBuf contains an instance of APFloat.
- bool BufHasFpVal;
+ bool BufHasFpVal = false;
// The integer coefficient of an individual addend is either 1 or -1,
// and we try to simplify at most 4 addends from neighboring at most
// two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
// is overkill of this end.
- short IntVal;
+ short IntVal = 0;
AlignedCharArrayUnion<APFloat> FpValBuf;
};
@@ -112,10 +126,14 @@ namespace {
/// FAddend is used to represent floating-point addend. An addend is
/// represented as <C, V>, where the V is a symbolic value, and C is a
/// constant coefficient. A constant addend is represented as <C, 0>.
- ///
class FAddend {
public:
- FAddend() : Val(nullptr) {}
+ FAddend() = default;
+
+ void operator+=(const FAddend &T) {
+ assert((Val == T.Val) && "Symbolic-values disagree");
+ Coeff += T.Coeff;
+ }
Value *getSymVal() const { return Val; }
const FAddendCoef &getCoef() const { return Coeff; }
@@ -146,16 +164,11 @@ namespace {
/// splitted is the addend itself.
unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
- void operator+=(const FAddend &T) {
- assert((Val == T.Val) && "Symbolic-values disagree");
- Coeff += T.Coeff;
- }
-
private:
void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
// This addend has the value of "Coeff * Val".
- Value *Val;
+ Value *Val = nullptr;
FAddendCoef Coeff;
};
@@ -164,11 +177,12 @@ namespace {
///
class FAddCombine {
public:
- FAddCombine(InstCombiner::BuilderTy &B) : Builder(B), Instr(nullptr) {}
+ FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {}
+
Value *simplify(Instruction *FAdd);
private:
- typedef SmallVector<const FAddend*, 4> AddendVect;
+ using AddendVect = SmallVector<const FAddend *, 4>;
Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
@@ -179,6 +193,7 @@ namespace {
/// Return the number of instructions needed to emit the N-ary addition.
unsigned calcInstrNumber(const AddendVect& Vect);
+
Value *createFSub(Value *Opnd0, Value *Opnd1);
Value *createFAdd(Value *Opnd0, Value *Opnd1);
Value *createFMul(Value *Opnd0, Value *Opnd1);
@@ -187,9 +202,6 @@ namespace {
Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
- InstCombiner::BuilderTy &Builder;
- Instruction *Instr;
-
// Debugging stuff are clustered here.
#ifndef NDEBUG
unsigned CreateInstrNum;
@@ -199,9 +211,12 @@ namespace {
void initCreateInstNum() {}
void incCreateInstNum() {}
#endif
+
+ InstCombiner::BuilderTy &Builder;
+ Instruction *Instr = nullptr;
};
-} // anonymous namespace
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
//
@@ -332,7 +347,6 @@ Value *FAddendCoef::getValue(Type *Ty) c
// 0 +/- 0 <0, NULL> (corner case)
//
// Legend: A and B are not constant, C is constant
-//
unsigned FAddend::drillValueDownOneStep
(Value *Val, FAddend &Addend0, FAddend &Addend1) {
Instruction *I = nullptr;
@@ -396,7 +410,6 @@ unsigned FAddend::drillValueDownOneStep
// Try to break *this* addend into two addends. e.g. Suppose this addend is
// <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
// i.e. <2.3, X> and <2.3, Y>.
-//
unsigned FAddend::drillAddendDownOneStep
(FAddend &Addend0, FAddend &Addend1) const {
if (isConstant())
@@ -421,7 +434,6 @@ unsigned FAddend::drillAddendDownOneStep
// -------------------------------------------------------
// (x * y) +/- (x * z) x * (y +/- z)
// (y / x) +/- (z / x) (y +/- z) / x
-//
Value *FAddCombine::performFactorization(Instruction *I) {
assert((I->getOpcode() == Instruction::FAdd ||
I->getOpcode() == Instruction::FSub) && "Expect add/sub");
@@ -447,7 +459,6 @@ Value *FAddCombine::performFactorization
// ----------------------------------------------
// (x*y) +/- (x*z) x y z
// (y/x) +/- (z/x) x y z
- //
Value *Factor = nullptr;
Value *AddSub0 = nullptr, *AddSub1 = nullptr;
@@ -599,7 +610,6 @@ Value *FAddCombine::simplifyFAdd(AddendV
// desirable to reside at the top of the resulting expression tree. Placing
// constant close to supper-expr(s) will potentially reveal some optimization
// opportunities in super-expr(s).
- //
const FAddend *ConstAdd = nullptr;
// Simplified addends are placed <SimpVect>.
@@ -608,7 +618,6 @@ Value *FAddCombine::simplifyFAdd(AddendV
// The outer loop works on one symbolic-value at a time. Suppose the input
// addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
// The symbolic-values will be processed in this order: x, y, z.
- //
for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
const FAddend *ThisAddend = Addends[SymIdx];
@@ -626,7 +635,6 @@ Value *FAddCombine::simplifyFAdd(AddendV
// example, if the symbolic value "y" is being processed, the inner loop
// will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
// be later on folded into "<b1+b2, y>".
- //
for (unsigned SameSymIdx = SymIdx + 1;
SameSymIdx < AddendNum; SameSymIdx++) {
const FAddend *T = Addends[SameSymIdx];
@@ -681,7 +689,7 @@ Value *FAddCombine::createNaryFAdd
assert(!Opnds.empty() && "Expect at least one addend");
// Step 1: Check if the # of instructions needed exceeds the quota.
- //
+
unsigned InstrNeeded = calcInstrNumber(Opnds);
if (InstrNeeded > InstrQuota)
return nullptr;
@@ -726,10 +734,10 @@ Value *FAddCombine::createNaryFAdd
LastVal = createFNeg(LastVal);
}
- #ifndef NDEBUG
- assert(CreateInstrNum == InstrNeeded &&
- "Inconsistent in instruction numbers");
- #endif
+#ifndef NDEBUG
+ assert(CreateInstrNum == InstrNeeded &&
+ "Inconsistent in instruction numbers");
+#endif
return LastVal;
}
@@ -1034,7 +1042,7 @@ Instruction *InstCombiner::visitAdd(Bina
SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V);
- // (A*B)+(A*C) -> A*(B+C) etc
+ // (A*B)+(A*C) -> A*(B+C) etc
if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V);
@@ -1389,7 +1397,6 @@ Instruction *InstCombiner::visitFAdd(Bin
/// Optimize pointer differences into the same array into a size. Consider:
/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
-///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Type *Ty) {
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
@@ -1611,7 +1618,6 @@ Instruction *InstCombiner::visitSub(Bina
Builder.CreateSub(Z, Y, Op1->getName()));
// (X - (X & Y)) --> (X & ~Y)
- //
if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
return BinaryOperator::CreateAnd(Op0,
Builder.CreateNot(Y, Y->getName() + ".not"));
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Tue Oct 24 14:24:53 2017
@@ -16,16 +16,20 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
@@ -40,18 +44,26 @@
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstring>
+#include <utility>
#include <vector>
using namespace llvm;
@@ -515,7 +527,7 @@ static Value *simplifyX86varShift(const
// If all elements out of range or UNDEF, return vector of zeros/undefs.
// ArithmeticShift should only hit this if they are all UNDEF.
auto OutOfRange = [&](int Idx) { return (Idx < 0) || (BitWidth <= Idx); };
- if (all_of(ShiftAmts, OutOfRange)) {
+ if (llvm::all_of(ShiftAmts, OutOfRange)) {
SmallVector<Constant *, 8> ConstantVec;
for (int Idx : ShiftAmts) {
if (Idx < 0) {
@@ -1584,7 +1596,6 @@ static Instruction *SimplifyNVVMIntrinsi
// IntrinsicInstr with target-generic LLVM IR.
const SimplifyAction Action = [II]() -> SimplifyAction {
switch (II->getIntrinsicID()) {
-
// NVVM intrinsics that map directly to LLVM intrinsics.
case Intrinsic::nvvm_ceil_d:
return {Intrinsic::ceil, FTZ_Any};
@@ -2313,11 +2324,10 @@ Instruction *InstCombiner::visitCallInst
case Intrinsic::x86_sse2_pmovmskb_128:
case Intrinsic::x86_avx_movmsk_pd_256:
case Intrinsic::x86_avx_movmsk_ps_256:
- case Intrinsic::x86_avx2_pmovmskb: {
+ case Intrinsic::x86_avx2_pmovmskb:
if (Value *V = simplifyX86movmsk(*II))
return replaceInstUsesWith(*II, V);
break;
- }
case Intrinsic::x86_sse_comieq_ss:
case Intrinsic::x86_sse_comige_ss:
@@ -3371,7 +3381,6 @@ Instruction *InstCombiner::visitCallInst
return II;
break;
-
}
case Intrinsic::amdgcn_fmed3: {
// Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
@@ -3727,7 +3736,6 @@ Instruction *InstCombiner::visitFenceIns
}
// InvokeInst simplification
-//
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
return visitCallSite(&II);
}
@@ -3840,7 +3848,6 @@ static IntrinsicInst *findInitTrampoline
// Given a call to llvm.adjust.trampoline, find and return the corresponding
// call to llvm.init.trampoline if the call to the trampoline can be optimized
// to a direct call to a function. Otherwise return NULL.
-//
static IntrinsicInst *findInitTrampoline(Value *Callee) {
Callee = Callee->stripPointerCasts();
IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
@@ -4008,7 +4015,6 @@ bool InstCombiner::transformConstExprCas
// Okay, this is a cast from a function to a different type. Unless doing so
// would cause a type conversion of one of our arguments, change this call to
// be a direct call with arguments casted to the appropriate types.
- //
FunctionType *FT = Callee->getFunctionType();
Type *OldRetTy = Caller->getType();
Type *NewRetTy = FT->getReturnType();
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineInternal.h Tue Oct 24 14:24:53 2017
@@ -6,42 +6,59 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
/// \file
///
/// This file provides internal interfaces used to implement the InstCombine.
-///
+//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/Operator.h"
-#include "llvm/IR/PatternMatch.h"
-#include "llvm/Pass.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+#include <cstdint>
#define DEBUG_TYPE "instcombine"
namespace llvm {
+
+class APInt;
+class AssumptionCache;
class CallSite;
class DataLayout;
class DominatorTree;
-class TargetLibraryInfo;
-class MemIntrinsic;
-class MemSetInst;
+class GEPOperator;
+class GlobalVariable;
+class LoopInfo;
class OptimizationRemarkEmitter;
+class TargetLibraryInfo;
+class User;
/// Assign a complexity or rank value to LLVM Values. This is used to reduce
/// the amount of pattern matching needed for compares and commutative
@@ -109,6 +126,7 @@ static inline Value *peekThroughBitcast(
static inline Constant *AddOne(Constant *C) {
return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
}
+
/// \brief Subtract one from a Constant
static inline Constant *SubOne(Constant *C) {
return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
@@ -118,7 +136,6 @@ static inline Constant *SubOne(Constant
/// This happens in cases where the ~ can be eliminated. If WillInvertAllUses
/// is true, work under the assumption that the caller intends to remove all
/// uses of V and only keep uses of ~V.
-///
static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) {
// ~(~(X)) -> X.
if (BinaryOperator::isNot(V))
@@ -161,7 +178,6 @@ static inline bool IsFreeToInvert(Value
return false;
}
-
/// \brief Specific patterns of overflow check idioms that we match.
enum OverflowCheckFlavor {
OCF_UNSIGNED_ADD,
@@ -209,12 +225,13 @@ public:
/// \brief An IRBuilder that automatically inserts new instructions into the
/// worklist.
- typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderTy;
+ using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
BuilderTy &Builder;
private:
// Mode in which we are running the combiner.
const bool MinimizeSize;
+
/// Enable combines that trigger rarely but are costly in compiletime.
const bool ExpensiveCombines;
@@ -227,11 +244,12 @@ private:
const DataLayout &DL;
const SimplifyQuery SQ;
OptimizationRemarkEmitter &ORE;
+
// Optional analyses. When non-null, these can both be used to do better
// combining and will be updated to reflect any changes.
LoopInfo *LI;
- bool MadeIRChange;
+ bool MadeIRChange = false;
public:
InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder,
@@ -241,7 +259,7 @@ public:
LoopInfo *LI)
: Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT),
- DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), LI(LI), MadeIRChange(false) {}
+ DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), LI(LI) {}
/// \brief Run the combiner over the entire worklist until it is empty.
///
@@ -413,27 +431,32 @@ private:
bool DoTransform = true);
Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI);
+
bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const {
return computeOverflowForSignedAdd(LHS, RHS, &CxtI) ==
OverflowResult::NeverOverflows;
- };
+ }
+
bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const {
return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) ==
OverflowResult::NeverOverflows;
- };
+ }
+
bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const;
bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const;
bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const;
+
bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS,
const Instruction &CxtI) const {
return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) ==
OverflowResult::NeverOverflows;
- };
+ }
+
Value *EmitGEPOffset(User *GEP);
Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
@@ -548,6 +571,7 @@ public:
unsigned Depth, const Instruction *CxtI) const {
llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
}
+
KnownBits computeKnownBits(const Value *V, unsigned Depth,
const Instruction *CxtI) const {
return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
@@ -563,20 +587,24 @@ public:
const Instruction *CxtI = nullptr) const {
return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
}
+
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
const Instruction *CxtI = nullptr) const {
return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
}
+
OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
const Value *RHS,
const Instruction *CxtI) const {
return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
}
+
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const Instruction *CxtI) const {
return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
}
+
OverflowResult computeOverflowForSignedAdd(const Value *LHS,
const Value *RHS,
const Instruction *CxtI) const {
@@ -626,6 +654,7 @@ private:
bool SimplifyDemandedBits(Instruction *I, unsigned Op,
const APInt &DemandedMask, KnownBits &Known,
unsigned Depth = 0);
+
/// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
/// bits. It also tries to handle simplifications that can be done based on
/// DemandedMask, but without modifying the Instruction.
@@ -633,6 +662,7 @@ private:
const APInt &DemandedMask,
KnownBits &Known,
unsigned Depth, Instruction *CxtI);
+
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded
/// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence.
Value *simplifyShrShlDemandedBits(
@@ -672,6 +702,7 @@ private:
Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN);
+
/// If an integer typed PHI has only one use which is an IntToPtr operation,
/// replace the PHI with an existing pointer typed PHI if it exists. Otherwise
/// insert a new pointer typed PHI and replace the original one.
@@ -771,8 +802,8 @@ private:
Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap);
};
-} // end namespace llvm.
+} // end namespace llvm
#undef DEBUG_TYPE
-#endif
+#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp Tue Oct 24 14:24:53 2017
@@ -13,15 +13,36 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
using namespace llvm;
using namespace PatternMatch;
#define DEBUG_TYPE "instcombine"
-
/// The specific integer value is used in a context where it is known to be
/// non-zero. If this allows us to simplify the computation, do so and return
/// the new operand, otherwise return null.
@@ -73,7 +94,6 @@ static Value *simplifyValueKnownNonZero(
return MadeChange ? V : nullptr;
}
-
/// True if the multiply can not be expressed in an int this size.
static bool MultiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
bool IsSigned) {
@@ -540,7 +560,6 @@ static bool isFMulOrFDivWithConstant(Val
/// This function is to simplify "FMulOrDiv * C" and returns the
/// resulting expression. Note that this function could return NULL in
/// case the constants cannot be folded into a normal floating-point.
-///
Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C,
Instruction *InsertBefore) {
assert(isFMulOrFDivWithConstant(FMulOrDiv) && "V is invalid");
@@ -747,7 +766,6 @@ Instruction *InstCombiner::visitFMul(Bin
// latency of the instruction Y is amortized by the expression of X*X,
// and therefore Y is in a "less critical" position compared to what it
// was before the transformation.
- //
if (AllowReassociate) {
Value *Opnd0_0, *Opnd0_1;
if (Opnd0->hasOneUse() &&
@@ -848,7 +866,6 @@ bool InstCombiner::simplifyDivRemOfSelec
return true;
}
-
/// This function implements the transforms common to both integer division
/// instructions (udiv and sdiv). It is called by the visitors to those integer
/// division instructions.
@@ -974,25 +991,29 @@ Instruction *InstCombiner::commonIDivTra
return nullptr;
}
+static const unsigned MaxDepth = 6;
+
namespace {
-const unsigned MaxDepth = 6;
-typedef Instruction *(*FoldUDivOperandCb)(Value *Op0, Value *Op1,
- const BinaryOperator &I,
- InstCombiner &IC);
+
+using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1,
+ const BinaryOperator &I,
+ InstCombiner &IC);
/// \brief Used to maintain state for visitUDivOperand().
struct UDivFoldAction {
- FoldUDivOperandCb FoldAction; ///< Informs visitUDiv() how to fold this
- ///< operand. This can be zero if this action
- ///< joins two actions together.
+ /// Informs visitUDiv() how to fold this operand. This can be zero if this
+ /// action joins two actions together.
+ FoldUDivOperandCb FoldAction;
+
+ /// Which operand to fold.
+ Value *OperandToFold;
- Value *OperandToFold; ///< Which operand to fold.
union {
- Instruction *FoldResult; ///< The instruction returned when FoldAction is
- ///< invoked.
+ /// The instruction returned when FoldAction is invoked.
+ Instruction *FoldResult;
- size_t SelectLHSIdx; ///< Stores the LHS action index if this action
- ///< joins two actions together.
+ /// Stores the LHS action index if this action joins two actions together.
+ size_t SelectLHSIdx;
};
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
@@ -1000,7 +1021,8 @@ struct UDivFoldAction {
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
: FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
};
-}
+
+} // end anonymous namespace
// X udiv 2^C -> X >> C
static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
@@ -1280,8 +1302,7 @@ Instruction *InstCombiner::visitSDiv(Bin
/// 1) 1/C is exact, or
/// 2) reciprocal is allowed.
/// If the conversion was successful, the simplified expression "X * 1/C" is
-/// returned; otherwise, NULL is returned.
-///
+/// returned; otherwise, nullptr is returned.
static Instruction *CvtFDivConstToReciprocal(Value *Dividend, Constant *Divisor,
bool AllowReciprocal) {
if (!isa<ConstantFP>(Divisor)) // TODO: handle vectors.
@@ -1342,7 +1363,6 @@ Instruction *InstCombiner::visitFDiv(Bin
Res = BinaryOperator::CreateFMul(X, C);
} else if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
// (X/C1)/C2 => X /(C2*C1) [=> X * 1/(C2*C1) if reciprocal is allowed]
- //
Constant *C = ConstantExpr::getFMul(C1, C2);
if (isNormalFp(C)) {
Res = CvtFDivConstToReciprocal(X, C, AllowReciprocal);
@@ -1400,7 +1420,6 @@ Instruction *InstCombiner::visitFDiv(Bin
if (Op0->hasOneUse() && match(Op0, m_FDiv(m_Value(X), m_Value(Y)))) {
// (X/Y) / Z => X / (Y*Z)
- //
if (!isa<Constant>(Y) || !isa<Constant>(Op1)) {
NewInst = Builder.CreateFMul(Y, Op1);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
@@ -1412,7 +1431,6 @@ Instruction *InstCombiner::visitFDiv(Bin
}
} else if (Op1->hasOneUse() && match(Op1, m_FDiv(m_Value(X), m_Value(Y)))) {
// Z / (X/Y) => Z*Y / X
- //
if (!isa<Constant>(Y) || !isa<Constant>(Op0)) {
NewInst = Builder.CreateFMul(Op0, Y);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
@@ -1468,7 +1486,6 @@ Instruction *InstCombiner::commonIRemTra
if (Instruction *R = FoldOpIntoSelect(I, SI))
return R;
} else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
- using namespace llvm::PatternMatch;
const APInt *Op1Int;
if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
(I.getOpcode() == Instruction::URem ||
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp Tue Oct 24 14:24:53 2017
@@ -12,13 +12,36 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CmpInstAnalysis.h"
-#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+#include <cassert>
+#include <utility>
+
using namespace llvm;
using namespace PatternMatch;
@@ -185,7 +208,6 @@ static Value *foldSelectICmpAnd(Type *Se
/// Assuming that the specified instruction is an operand to the select, return
/// a bitmask indicating which operands of this instruction are foldable if they
/// equal the other incoming value of the select.
-///
static unsigned getSelectFoldableOperands(BinaryOperator *I) {
switch (I->getOpcode()) {
case Instruction::Add:
@@ -263,7 +285,6 @@ Instruction *InstCombiner::foldSelectOpO
if (TI->getOpcode() != Instruction::BitCast &&
(!TI->hasOneUse() || !FI->hasOneUse()))
return nullptr;
-
} else if (!TI->hasOneUse() || !FI->hasOneUse()) {
// TODO: The one-use restrictions for a scalar select could be eased if
// the fold of a select in visitLoadInst() was enhanced to match a pattern
@@ -840,7 +861,6 @@ Instruction *InstCombiner::foldSelectIns
/// Z = select X, Y, 0
///
/// because Y is not live in BB1/BB2.
-///
static bool canSelectOperandBeMappingIntoPredBlock(const Value *V,
const SelectInst &SI) {
// If the value is a non-instruction value like a constant or argument, it
@@ -1209,7 +1229,7 @@ Instruction *InstCombiner::visitSelectIn
// may have an undef operand. This is a workaround for PR31652 caused by
// descrepancy about branch on undef between LoopUnswitch and GVN.
if (isa<UndefValue>(TrueVal) || isa<UndefValue>(FalseVal)) {
- if (any_of(SI.users(), [&](User *U) {
+ if (llvm::any_of(SI.users(), [&](User *U) {
ICmpInst *CI = dyn_cast<ICmpInst>(U);
if (CI && CI->isEquality())
return true;
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineVectorOps.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineVectorOps.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineVectorOps.cpp Tue Oct 24 14:24:53 2017
@@ -13,10 +13,33 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <utility>
+
using namespace llvm;
using namespace PatternMatch;
@@ -90,7 +113,7 @@ Instruction *InstCombiner::scalarizePHI(
// Verify that this PHI user has one use, which is the PHI itself,
// and that it is a binary operation which is cheap to scalarize.
- // otherwise return NULL.
+ // otherwise return nullptr.
if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
!(isa<BinaryOperator>(PHIUser)) || !cheapToScalarize(PHIUser, true))
return nullptr;
@@ -421,7 +444,7 @@ static void replaceExtractElements(Inser
///
/// Note: we intentionally don't try to fold earlier shuffles since they have
/// often been chosen carefully to be efficiently implementable on the target.
-typedef std::pair<Value *, Value *> ShuffleOps;
+using ShuffleOps = std::pair<Value *, Value *>;
static ShuffleOps collectShuffleElements(Value *V,
SmallVectorImpl<Constant *> &Mask,
@@ -1421,7 +1444,7 @@ Instruction *InstCombiner::visitShuffleV
eltMask = Mask[i]-LHSWidth;
// If LHS's width is changed, shift the mask value accordingly.
- // If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any
+ // If newRHS == nullptr, i.e. LHSOp0 == RHSOp0, we want to remap any
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
// If newRHS == newLHS, we want to remap any references from newRHS to
// newLHS so that we can properly identify splats that may occur due to
Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Tue Oct 24 14:24:53 2017
@@ -34,10 +34,14 @@
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
-#include "llvm-c/Initialization.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
@@ -49,26 +53,55 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DebugCounter.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
-#include <climits>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <utility>
+
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -396,7 +429,7 @@ bool InstCombiner::SimplifyAssociativeOr
// No further simplifications.
return Changed;
- } while (1);
+ } while (true);
}
/// Return whether "X LOp (Y ROp Z)" is always equal to
@@ -1174,7 +1207,7 @@ Value *InstCombiner::Descale(Value *Val,
// Parent - initially null, but after drilling down notes where Op came from.
// In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
// 0'th operand of Val.
- std::pair<Instruction*, unsigned> Parent;
+ std::pair<Instruction *, unsigned> Parent;
// Set if the transform requires a descaling at deeper levels that doesn't
// overflow.
@@ -1184,7 +1217,6 @@ Value *InstCombiner::Descale(Value *Val,
int32_t logScale = Scale.exactLogBase2();
for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
-
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// If Op is a constant divisible by Scale then descale to the quotient.
APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
@@ -1199,7 +1231,6 @@ Value *InstCombiner::Descale(Value *Val,
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
-
if (BO->getOpcode() == Instruction::Mul) {
// Multiplication.
NoSignedWrap = BO->hasNoSignedWrap();
@@ -1374,7 +1405,7 @@ Value *InstCombiner::Descale(Value *Val,
// Move up one level in the expression.
assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
Ancestor = Ancestor->user_back();
- } while (1);
+ } while (true);
}
/// \brief Creates node of binary operation with the same attributes as the
@@ -1621,7 +1652,6 @@ Instruction *InstCombiner::visitGetEleme
// Combine Indices - If the source pointer to this getelementptr instruction
// is a getelementptr instruction, combine the indices of the two
// getelementptr instructions into a single instruction.
- //
if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
return nullptr;
@@ -1646,7 +1676,6 @@ Instruction *InstCombiner::visitGetEleme
if (EndsWithSequential) {
// Replace: gep (gep %P, long B), long A, ...
// With: T = long A+B; gep %P, T, ...
- //
Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
Value *GO1 = GEP.getOperand(1);
@@ -2226,7 +2255,6 @@ tryToMoveFreeBeforeNullTest(CallInst &FI
return &FI;
}
-
Instruction *InstCombiner::visitFree(CallInst &FI) {
Value *Op = FI.getArgOperand(0);
@@ -3060,7 +3088,6 @@ bool InstCombiner::run() {
/// them to the worklist (this significantly speeds up instcombine on code where
/// many instructions are dead or constant). Additionally, if we find a branch
/// whose condition is a known constant, we only visit the reachable successors.
-///
static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
SmallPtrSetImpl<BasicBlock *> &Visited,
InstCombineWorklist &ICWorklist,
@@ -3209,8 +3236,6 @@ static bool combineInstructionsOverFunct
F.getContext(), TargetFolder(DL),
IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
Worklist.Add(I);
-
- using namespace llvm::PatternMatch;
if (match(I, m_Intrinsic<Intrinsic::assume>()))
AC.registerAssumption(cast<CallInst>(I));
}));
@@ -3223,7 +3248,7 @@ static bool combineInstructionsOverFunct
// Iterate while there is work to do.
int Iteration = 0;
- for (;;) {
+ while (true) {
++Iteration;
DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
<< F.getName() << "\n");
@@ -3297,6 +3322,7 @@ bool InstructionCombiningPass::runOnFunc
}
char InstructionCombiningPass::ID = 0;
+
INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
"Combine redundant instructions", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
Modified: llvm/trunk/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp?rev=316503&r1=316502&r2=316503&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp Tue Oct 24 14:24:53 2017
@@ -1,4 +1,4 @@
-//===-- InductiveRangeCheckElimination.cpp - ------------------------------===//
+//===- InductiveRangeCheckElimination.cpp - -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,6 +6,7 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
// The InductiveRangeCheckElimination pass splits a loop's iteration space into
// three disjoint ranges. It does that in a way such that the loop running in
// the middle loop provably does not need range checks. As an example, it will
@@ -39,30 +40,61 @@
// throw_out_of_bounds();
// }
// }
+//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/LoopSimplify.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <limits>
+#include <utility>
+#include <vector>
using namespace llvm;
+using namespace llvm::PatternMatch;
static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
cl::init(64));
@@ -203,6 +235,7 @@ public:
class InductiveRangeCheckElimination : public LoopPass {
public:
static char ID;
+
InductiveRangeCheckElimination() : LoopPass(ID) {
initializeInductiveRangeCheckEliminationPass(
*PassRegistry::getPassRegistry());
@@ -216,8 +249,9 @@ public:
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
};
+} // end anonymous namespace
+
char InductiveRangeCheckElimination::ID = 0;
-}
INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce",
"Inductive range check elimination", false, false)
@@ -251,12 +285,10 @@ StringRef InductiveRangeCheck::rangeChec
/// range checked, and set `Length` to the upper limit `Index` is being range
/// checked with if (and only if) the range check type is stronger or equal to
/// RANGE_CHECK_UPPER.
-///
InductiveRangeCheck::RangeCheckKind
InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
ScalarEvolution &SE, Value *&Index,
Value *&Length) {
-
auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) {
const SCEV *S = SE.getSCEV(V);
if (isa<SCEVCouldNotCompute>(S))
@@ -266,8 +298,6 @@ InductiveRangeCheck::parseRangeCheckICmp
SE.isKnownNonNegative(S);
};
- using namespace llvm::PatternMatch;
-
ICmpInst::Predicate Pred = ICI->getPredicate();
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
@@ -321,8 +351,6 @@ void InductiveRangeCheck::extractRangeCh
Loop *L, ScalarEvolution &SE, Use &ConditionUse,
SmallVectorImpl<InductiveRangeCheck> &Checks,
SmallPtrSetImpl<Value *> &Visited) {
- using namespace llvm::PatternMatch;
-
Value *Condition = ConditionUse.get();
if (!Visited.insert(Condition).second)
return;
@@ -341,7 +369,6 @@ void InductiveRangeCheck::extractRangeCh
const auto &RChkB = SubChecks[1];
if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) &&
RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) {
-
// If RChkA.Kind == RChkB.Kind then we just found two identical checks.
// But if one of them is a RANGE_CHECK_LOWER and the other is a
// RANGE_CHECK_UPPER (only possibility if they're different) then
@@ -388,7 +415,6 @@ void InductiveRangeCheck::extractRangeCh
void InductiveRangeCheck::extractRangeChecksFromBranch(
BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI,
SmallVectorImpl<InductiveRangeCheck> &Checks) {
-
if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
return;
@@ -439,16 +465,16 @@ namespace {
// kinds of loops we can deal with -- ones that have a single latch that is also
// an exiting block *and* have a canonical induction variable.
struct LoopStructure {
- const char *Tag;
+ const char *Tag = "";
- BasicBlock *Header;
- BasicBlock *Latch;
+ BasicBlock *Header = nullptr;
+ BasicBlock *Latch = nullptr;
// `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th
// successor is `LatchExit', the exit block of the loop.
- BranchInst *LatchBr;
- BasicBlock *LatchExit;
- unsigned LatchBrExitIdx;
+ BranchInst *LatchBr = nullptr;
+ BasicBlock *LatchExit = nullptr;
+ unsigned LatchBrExitIdx = std::numeric_limits<unsigned>::max();
// The loop represented by this instance of LoopStructure is semantically
// equivalent to:
@@ -459,18 +485,14 @@ struct LoopStructure {
// for (intN_ty iv = IndVarStart; predicate(iv, LoopExitAt); iv = IndVarBase)
// ... body ...
- Value *IndVarBase;
- Value *IndVarStart;
- Value *IndVarStep;
- Value *LoopExitAt;
- bool IndVarIncreasing;
- bool IsSignedPredicate;
-
- LoopStructure()
- : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr),
- LatchExit(nullptr), LatchBrExitIdx(-1), IndVarBase(nullptr),
- IndVarStart(nullptr), IndVarStep(nullptr), LoopExitAt(nullptr),
- IndVarIncreasing(false), IsSignedPredicate(true) {}
+ Value *IndVarBase = nullptr;
+ Value *IndVarStart = nullptr;
+ Value *IndVarStep = nullptr;
+ Value *LoopExitAt = nullptr;
+ bool IndVarIncreasing = false;
+ bool IsSignedPredicate = true;
+
+ LoopStructure() = default;
template <typename M> LoopStructure map(M Map) const {
LoopStructure Result;
@@ -503,7 +525,6 @@ struct LoopStructure {
/// loops to run any remaining iterations. The pre loop runs any iterations in
/// which the induction variable is < Begin, and the post loop runs any
/// iterations in which the induction variable is >= End.
-///
class LoopConstrainer {
// The representation of a clone of the original loop we started out with.
struct ClonedLoop {
@@ -520,13 +541,12 @@ class LoopConstrainer {
// Result of rewriting the range of a loop. See changeIterationSpaceEnd for
// more details on what these fields mean.
struct RewrittenRangeInfo {
- BasicBlock *PseudoExit;
- BasicBlock *ExitSelector;
+ BasicBlock *PseudoExit = nullptr;
+ BasicBlock *ExitSelector = nullptr;
std::vector<PHINode *> PHIValuesAtPseudoExit;
- PHINode *IndVarEnd;
+ PHINode *IndVarEnd = nullptr;
- RewrittenRangeInfo()
- : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {}
+ RewrittenRangeInfo() = default;
};
// Calculated subranges we restrict the iteration space of the main loop to.
@@ -550,14 +570,12 @@ class LoopConstrainer {
// Compute a safe set of limits for the main loop to run in -- effectively the
// intersection of `Range' and the iteration space of the original loop.
// Return None if unable to compute the set of subranges.
- //
Optional<SubRanges> calculateSubRanges(bool IsSignedPredicate) const;
// Clone `OriginalLoop' and return the result in CLResult. The IR after
// running `cloneLoop' is well formed except for the PHI nodes in CLResult --
// the PHI nodes say that there is an incoming edge from `OriginalPreheader`
// but there is no such edge.
- //
void cloneLoop(ClonedLoop &CLResult, const char *Tag) const;
// Create the appropriate loop structure needed to describe a cloned copy of
@@ -586,7 +604,6 @@ class LoopConstrainer {
// After changeIterationSpaceEnd, `Preheader' is no longer a legitimate
// preheader because it is made to branch to the loop header only
// conditionally.
- //
RewrittenRangeInfo
changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader,
Value *ExitLoopAt,
@@ -594,7 +611,6 @@ class LoopConstrainer {
// The loop denoted by `LS' has `OldPreheader' as its preheader. This
// function creates a new preheader for `LS' and returns it.
- //
BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader,
const char *Tag) const;
@@ -622,12 +638,13 @@ class LoopConstrainer {
// Information about the original loop we started out with.
Loop &OriginalLoop;
- const SCEV *LatchTakenCount;
- BasicBlock *OriginalPreheader;
+
+ const SCEV *LatchTakenCount = nullptr;
+ BasicBlock *OriginalPreheader = nullptr;
// The preheader of the main loop. This may or may not be different from
// `OriginalPreheader'.
- BasicBlock *MainLoopPreheader;
+ BasicBlock *MainLoopPreheader = nullptr;
// The range we need to run the main loop in.
InductiveRangeCheck::Range Range;
@@ -641,15 +658,14 @@ public:
const LoopStructure &LS, ScalarEvolution &SE,
DominatorTree &DT, InductiveRangeCheck::Range R)
: F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()),
- SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L),
- LatchTakenCount(nullptr), OriginalPreheader(nullptr),
- MainLoopPreheader(nullptr), Range(R), MainLoopStructure(LS) {}
+ SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L), Range(R),
+ MainLoopStructure(LS) {}
// Entry point for the algorithm. Returns true on success.
bool run();
};
-}
+} // end anonymous namespace
void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
BasicBlock *ReplaceBy) {
@@ -1099,7 +1115,6 @@ LoopConstrainer::calculateSubRanges(bool
// that case, `Clamp` will always return `Smallest` and
// [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`)
// will be an empty range. Returning an empty range is always safe.
- //
Smallest = SE.getAddExpr(End, One);
Greatest = SE.getAddExpr(Start, One);
@@ -1189,7 +1204,6 @@ void LoopConstrainer::cloneLoop(LoopCons
LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd(
const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt,
BasicBlock *ContinuationBlock) const {
-
// We start with a loop with a single latch:
//
// +--------------------+
@@ -1260,7 +1274,6 @@ LoopConstrainer::RewrittenRangeInfo Loop
// | original exit <----+
// | |
// +--------------------+
- //
RewrittenRangeInfo RRI;
@@ -1363,7 +1376,6 @@ LoopConstrainer::RewrittenRangeInfo Loop
void LoopConstrainer::rewriteIncomingValuesForPHIs(
LoopStructure &LS, BasicBlock *ContinuationBlock,
const LoopConstrainer::RewrittenRangeInfo &RRI) const {
-
unsigned PHIIndex = 0;
for (Instruction &I : *LS.Header) {
auto *PN = dyn_cast<PHINode>(&I);
@@ -1381,7 +1393,6 @@ void LoopConstrainer::rewriteIncomingVal
BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS,
BasicBlock *OldPreheader,
const char *Tag) const {
-
BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header);
BranchInst::Create(LS.Header, Preheader);
More information about the llvm-commits
mailing list