[llvm] 5a8819b - [InstCombine] Use replaceOperand() in more places
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 11 08:38:38 PST 2020
Author: Nikita Popov
Date: 2020-02-11T17:38:23+01:00
New Revision: 5a8819b216e322c4a1e9cdc47094d1b773309b9d
URL: https://github.com/llvm/llvm-project/commit/5a8819b216e322c4a1e9cdc47094d1b773309b9d
DIFF: https://github.com/llvm/llvm-project/commit/5a8819b216e322c4a1e9cdc47094d1b773309b9d.diff
LOG: [InstCombine] Use replaceOperand() in more places
This is a followup to D73803, which uses the replaceOperand()
helper in more places.
This should be NFC apart from changes to worklist order.
Differential Revision: https://reviews.llvm.org/D73919
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 574662800941..d1b820f93660 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1380,8 +1380,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// (add (and A, B) (or A, B)) --> (add A, B)
if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)),
m_c_And(m_Deferred(A), m_Deferred(B))))) {
- I.setOperand(0, A);
- I.setOperand(1, B);
+ // Replacing operands in-place to preserve nuw/nsw flags.
+ replaceOperand(I, 0, A);
+ replaceOperand(I, 1, B);
return &I;
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
index 825f4b468b0a..9d3ecba559c3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp
@@ -138,13 +138,11 @@ Instruction *InstCombiner::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
if (RMWI.getType()->isIntegerTy() &&
RMWI.getOperation() != AtomicRMWInst::Or) {
RMWI.setOperation(AtomicRMWInst::Or);
- RMWI.setOperand(1, ConstantInt::get(RMWI.getType(), 0));
- return &RMWI;
+ return replaceOperand(RMWI, 1, ConstantInt::get(RMWI.getType(), 0));
} else if (RMWI.getType()->isFloatingPointTy() &&
RMWI.getOperation() != AtomicRMWInst::FAdd) {
RMWI.setOperation(AtomicRMWInst::FAdd);
- RMWI.setOperand(1, ConstantFP::getNegativeZero(RMWI.getType()));
- return &RMWI;
+ return replaceOperand(RMWI, 1, ConstantFP::getNegativeZero(RMWI.getType()));
}
// Check if the required ordering is compatible with an atomic load.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 0f8767873cf1..37e04e68ec5f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6030,14 +6030,11 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
// If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
// then canonicalize the operand to 0.0.
if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
- if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) {
- I.setOperand(0, ConstantFP::getNullValue(OpType));
- return &I;
- }
- if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) {
- I.setOperand(1, ConstantFP::getNullValue(OpType));
- return &I;
- }
+ if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI))
+ return replaceOperand(I, 0, ConstantFP::getNullValue(OpType));
+
+ if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI))
+ return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
}
// fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
@@ -6062,10 +6059,8 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
// The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
// fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
- if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP())) {
- I.setOperand(1, ConstantFP::getNullValue(OpType));
- return &I;
- }
+ if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
+ return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
// Handle fcmp with instruction LHS and constant RHS.
Instruction *LHSI;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index c086b152c09b..c9baa8b87faf 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -56,7 +56,8 @@ static Value *createMinMax(InstCombiner::BuilderTy &Builder,
/// Replace a select operand based on an equality comparison with the identity
/// constant of a binop.
static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
- const TargetLibraryInfo &TLI) {
+ const TargetLibraryInfo &TLI,
+ InstCombiner &IC) {
// The select condition must be an equality compare with a constant operand.
Value *X;
Constant *C;
@@ -107,8 +108,7 @@ static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
// S = { select (cmp eq X, C), BO, ? } or { select (cmp ne X, C), ?, BO }
// =>
// S = { select (cmp eq X, C), Y, ? } or { select (cmp ne X, C), ?, Y }
- Sel.setOperand(IsEq ? 1 : 2, Y);
- return &Sel;
+ return IC.replaceOperand(Sel, IsEq ? 1 : 2, Y);
}
/// This folds:
@@ -997,7 +997,7 @@ static bool adjustMinMax(SelectInst &Sel, ICmpInst &Cmp) {
/// constant operand of the select.
static Instruction *
canonicalizeMinMaxWithConstant(SelectInst &Sel, ICmpInst &Cmp,
- InstCombiner::BuilderTy &Builder) {
+ InstCombiner &IC) {
if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
return nullptr;
@@ -1020,7 +1020,7 @@ canonicalizeMinMaxWithConstant(SelectInst &Sel, ICmpInst &Cmp,
return nullptr;
// Create the canonical compare and plug it into the select.
- Sel.setCondition(Builder.CreateICmp(CanonicalPred, LHS, RHS));
+ IC.replaceOperand(Sel, 0, IC.Builder.CreateICmp(CanonicalPred, LHS, RHS));
// If the select operands did not change, we're done.
if (Sel.getTrueValue() == LHS && Sel.getFalseValue() == RHS)
@@ -1329,7 +1329,7 @@ static Instruction *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
// and swap the hands of select.
static Instruction *
tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
- InstCombiner::BuilderTy &Builder) {
+ InstCombiner &IC) {
ICmpInst::Predicate Pred;
Value *X;
Constant *C0;
@@ -1381,13 +1381,13 @@ tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
return nullptr;
// It matched! Lets insert the new comparison just before select.
- InstCombiner::BuilderTy::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(&Sel);
+ InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
+ IC.Builder.SetInsertPoint(&Sel);
Pred = ICmpInst::getSwappedPredicate(Pred); // Yes, swapped.
- Value *NewCmp = Builder.CreateICmp(Pred, X, FlippedStrictness->second,
- Cmp.getName() + ".inv");
- Sel.setCondition(NewCmp);
+ Value *NewCmp = IC.Builder.CreateICmp(Pred, X, FlippedStrictness->second,
+ Cmp.getName() + ".inv");
+ IC.replaceOperand(Sel, 0, NewCmp);
Sel.swapValues();
Sel.swapProfMetadata();
@@ -1400,7 +1400,7 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
if (Value *V = foldSelectValueEquivalence(SI, *ICI, SQ))
return replaceInstUsesWith(SI, V);
- if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, Builder))
+ if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, *this))
return NewSel;
if (Instruction *NewAbs = canonicalizeAbsNabs(SI, *ICI, Builder))
@@ -1410,7 +1410,7 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
return NewAbs;
if (Instruction *NewSel =
- tryToReuseConstantFromSelectInComparison(SI, *ICI, Builder))
+ tryToReuseConstantFromSelectInComparison(SI, *ICI, *this))
return NewSel;
bool Changed = adjustMinMax(SI, *ICI);
@@ -1973,7 +1973,7 @@ static Instruction *canonicalizeSelectToShuffle(SelectInst &SI) {
/// other operations in IR and having all operands of a select be vector types
/// is likely better for vector codegen.
static Instruction *canonicalizeScalarSelectOfVecs(
- SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
+ SelectInst &Sel, InstCombiner &IC) {
Type *Ty = Sel.getType();
if (!Ty->isVectorTy())
return nullptr;
@@ -1987,9 +1987,7 @@ static Instruction *canonicalizeScalarSelectOfVecs(
// Splatting the extracted condition reduces code (we could directly create a
// splat shuffle of the source vector to eliminate the intermediate step).
unsigned NumElts = Ty->getVectorNumElements();
- Value *SplatCond = Builder.CreateVectorSplat(NumElts, Cond);
- Sel.setCondition(SplatCond);
- return &Sel;
+ return IC.replaceOperand(Sel, 0, IC.Builder.CreateVectorSplat(NumElts, Cond));
}
/// Reuse bitcasted operands between a compare and select:
@@ -2395,7 +2393,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *I = canonicalizeSelectToShuffle(SI))
return I;
- if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, Builder))
+ if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, *this))
return I;
// Canonicalize a one-use integer compare with a non-canonical predicate by
@@ -2698,8 +2696,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (TrueSI->getCondition() == CondVal) {
if (SI.getTrueValue() == TrueSI->getTrueValue())
return nullptr;
- SI.setOperand(1, TrueSI->getTrueValue());
- return &SI;
+ return replaceOperand(SI, 1, TrueSI->getTrueValue());
}
// select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
// We choose this as normal form to enable folding on the And and shortening
@@ -2718,8 +2715,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (FalseSI->getCondition() == CondVal) {
if (SI.getFalseValue() == FalseSI->getFalseValue())
return nullptr;
- SI.setOperand(2, FalseSI->getFalseValue());
- return &SI;
+ return replaceOperand(SI, 2, FalseSI->getFalseValue());
}
// select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
@@ -2788,7 +2784,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *NotCond;
if (match(CondVal, m_Not(m_Value(NotCond)))) {
- SI.setOperand(0, NotCond);
+ replaceOperand(SI, 0, NotCond);
SI.swapValues();
SI.swapProfMetadata();
return &SI;
@@ -2826,7 +2822,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *Select = foldSelectCmpXchg(SI))
return Select;
- if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI))
+ if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI, *this))
return Select;
if (Instruction *Rot = foldSelectRotate(SI))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 43be42a40969..49d6443d2277 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -388,8 +388,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
// demand the sign bit (and many others) here??
Value *Rem = Builder.CreateAnd(A, ConstantInt::get(I.getType(), *B - 1),
Op1->getName());
- I.setOperand(1, Rem);
- return &I;
+ return replaceOperand(I, 1, Rem);
}
if (Instruction *Logic = foldShiftOfShiftedLogic(I, Builder))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 4277774cff23..a5f41e43f9eb 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -1739,7 +1739,8 @@ static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
/// Try to replace a shuffle with an insertelement or try to replace a shuffle
/// operand with the operand of an insertelement.
-static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf) {
+static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
+ InstCombiner &IC) {
Value *V0 = Shuf.getOperand(0), *V1 = Shuf.getOperand(1);
SmallVector<int, 16> Mask = Shuf.getShuffleMask();
@@ -1759,20 +1760,16 @@ static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf) {
uint64_t IdxC;
if (match(V0, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
// shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask
- if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; })) {
- Shuf.setOperand(0, X);
- return &Shuf;
- }
+ if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; }))
+ return IC.replaceOperand(Shuf, 0, X);
}
if (match(V1, m_InsertElement(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
// Offset the index constant by the vector width because we are checking for
// accesses to the 2nd vector input of the shuffle.
IdxC += NumElts;
// shuf ?, (inselt X, ?, IdxC), Mask --> shuf ?, X, Mask
- if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; })) {
- Shuf.setOperand(1, X);
- return &Shuf;
- }
+ if (none_of(Mask, [IdxC](int MaskElt) { return MaskElt == (int)IdxC; }))
+ return IC.replaceOperand(Shuf, 1, X);
}
// shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC'
@@ -1949,7 +1946,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// These transforms have the potential to lose undef knowledge, so they are
// intentionally placed after SimplifyDemandedVectorElts().
- if (Instruction *I = foldShuffleWithInsert(SVI))
+ if (Instruction *I = foldShuffleWithInsert(SVI, *this))
return I;
if (Instruction *I = foldIdentityPaddedShuffles(SVI))
return I;
More information about the llvm-commits
mailing list