[llvm] Make CombinerHelper methods const (PR #119529)
Paul Bowen-Huggett via llvm-commits
llvm-commits at lists.llvm.org
Thu Dec 19 07:17:08 PST 2024
https://github.com/paulhuggett updated https://github.com/llvm/llvm-project/pull/119529
>From c7e9adfd982aa2985efda13c02f24ed1a34975ff Mon Sep 17 00:00:00 2001
From: Paul Bowen-Huggett <paulhuggett at mac.com>
Date: Wed, 11 Dec 2024 10:18:03 +0100
Subject: [PATCH] Make CombinerHelper methods const
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
There are a number of backends (specifically AArch64, AMDGPU, Mips,
and RISCV) which contain a “TOD”: make CombinerHelper methods const”
comment. This PR does just that and makes all of the CombinerHelper
methods const, removes the TODO comment and makes the associated
instance const. This change makes some sense because CombinerHelper
simply modifies other objects to which it holds pointers or
references.
Note that AMDGPU contains an identical comment for an instance of
AMDGPUCombinerHelper (a subclass of CombinerHelper). I deliberately
haven’t modified the methods of that class in order to limit the scope
of the change. I’m happy to do so either now or as a follow-up.
---
.../llvm/CodeGen/GlobalISel/CombinerHelper.h | 571 ++++++++++--------
.../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 541 +++++++++--------
.../GlobalISel/CombinerHelperArtifacts.cpp | 4 +-
.../GlobalISel/CombinerHelperCasts.cpp | 18 +-
.../GlobalISel/CombinerHelperCompares.cpp | 8 +-
.../GlobalISel/CombinerHelperVectorOps.cpp | 22 +-
.../GISel/AArch64O0PreLegalizerCombiner.cpp | 3 +-
.../GISel/AArch64PostLegalizerCombiner.cpp | 3 +-
.../GISel/AArch64PostLegalizerLowering.cpp | 3 +-
.../GISel/AArch64PreLegalizerCombiner.cpp | 6 +-
.../Target/AMDGPU/AMDGPURegBankCombiner.cpp | 3 +-
.../Target/Mips/MipsPostLegalizerCombiner.cpp | 3 +-
.../Target/Mips/MipsPreLegalizerCombiner.cpp | 3 +-
.../GISel/RISCVO0PreLegalizerCombiner.cpp | 3 +-
.../GISel/RISCVPostLegalizerCombiner.cpp | 3 +-
.../RISCV/GISel/RISCVPreLegalizerCombiner.cpp | 3 +-
16 files changed, 643 insertions(+), 554 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 55c3b72c8e027f..871456d2a55b5e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -172,17 +172,18 @@ class CombinerHelper {
/// Set the register bank of \p Reg.
/// Does nothing if the RegBank is null.
/// This is the counterpart to getRegBank.
- void setRegBank(Register Reg, const RegisterBank *RegBank);
+ void setRegBank(Register Reg, const RegisterBank *RegBank) const;
/// If \p MI is COPY, try to combine it.
/// Returns true if MI changed.
- bool tryCombineCopy(MachineInstr &MI);
- bool matchCombineCopy(MachineInstr &MI);
- void applyCombineCopy(MachineInstr &MI);
+ bool tryCombineCopy(MachineInstr &MI) const;
+ bool matchCombineCopy(MachineInstr &MI) const;
+ void applyCombineCopy(MachineInstr &MI) const;
/// Returns true if \p DefMI precedes \p UseMI or they are the same
/// instruction. Both must be in the same basic block.
- bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
+ bool isPredecessor(const MachineInstr &DefMI,
+ const MachineInstr &UseMI) const;
/// Returns true if \p DefMI dominates \p UseMI. By definition an
/// instruction dominates itself.
@@ -190,40 +191,50 @@ class CombinerHelper {
/// If we haven't been provided with a MachineDominatorTree during
/// construction, this function returns a conservative result that tracks just
/// a single basic block.
- bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
+ bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI) const;
/// If \p MI is extend that consumes the result of a load, try to combine it.
/// Returns true if MI changed.
- bool tryCombineExtendingLoads(MachineInstr &MI);
- bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
- void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
+ bool tryCombineExtendingLoads(MachineInstr &MI) const;
+ bool matchCombineExtendingLoads(MachineInstr &MI,
+ PreferredTuple &MatchInfo) const;
+ void applyCombineExtendingLoads(MachineInstr &MI,
+ PreferredTuple &MatchInfo) const;
/// Match (and (load x), mask) -> zextload x
- bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCombineLoadWithAndMask(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Combine a G_EXTRACT_VECTOR_ELT of a load into a narrowed
/// load.
- bool matchCombineExtractedVectorLoad(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCombineExtractedVectorLoad(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
- bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
- void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
+ bool matchCombineIndexedLoadStore(MachineInstr &MI,
+ IndexedLoadStoreMatchInfo &MatchInfo) const;
+ void applyCombineIndexedLoadStore(MachineInstr &MI,
+ IndexedLoadStoreMatchInfo &MatchInfo) const;
- bool matchSextTruncSextLoad(MachineInstr &MI);
- void applySextTruncSextLoad(MachineInstr &MI);
+ bool matchSextTruncSextLoad(MachineInstr &MI) const;
+ void applySextTruncSextLoad(MachineInstr &MI) const;
/// Match sext_inreg(load p), imm -> sextload p
- bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
- void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ bool matchSextInRegOfLoad(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo) const;
+ void applySextInRegOfLoad(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo) const;
/// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
/// when their source operands are identical.
- bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);
- void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);
+ bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
+ void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI) const;
/// If a brcond's true block is not the fallthrough, make it so by inverting
/// the condition and swapping operands.
- bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);
- void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);
+ bool matchOptBrCondByInvertingCond(MachineInstr &MI,
+ MachineInstr *&BrCond) const;
+ void applyOptBrCondByInvertingCond(MachineInstr &MI,
+ MachineInstr *&BrCond) const;
/// If \p MI is G_CONCAT_VECTORS, try to combine it.
/// Returns true if MI changed.
@@ -239,21 +250,25 @@ class CombinerHelper {
/// needed to produce the flattened build_vector.
///
/// \pre MI.getOpcode() == G_CONCAT_VECTORS.
- bool matchCombineConcatVectors(MachineInstr &MI, SmallVector<Register> &Ops);
+ bool matchCombineConcatVectors(MachineInstr &MI,
+ SmallVector<Register> &Ops) const;
/// Replace \p MI with a flattened build_vector with \p Ops
/// or an implicit_def if \p Ops is empty.
- void applyCombineConcatVectors(MachineInstr &MI, SmallVector<Register> &Ops);
+ void applyCombineConcatVectors(MachineInstr &MI,
+ SmallVector<Register> &Ops) const;
- bool matchCombineShuffleConcat(MachineInstr &MI, SmallVector<Register> &Ops);
+ bool matchCombineShuffleConcat(MachineInstr &MI,
+ SmallVector<Register> &Ops) const;
/// Replace \p MI with a flattened build_vector with \p Ops
/// or an implicit_def if \p Ops is empty.
- void applyCombineShuffleConcat(MachineInstr &MI, SmallVector<Register> &Ops);
+ void applyCombineShuffleConcat(MachineInstr &MI,
+ SmallVector<Register> &Ops) const;
/// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
/// Returns true if MI changed.
///
/// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
- bool tryCombineShuffleVector(MachineInstr &MI);
+ bool tryCombineShuffleVector(MachineInstr &MI) const;
/// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
/// concat_vectors.
/// \p Ops will contain the operands needed to produce the flattened
@@ -261,12 +276,12 @@ class CombinerHelper {
///
/// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
bool matchCombineShuffleVector(MachineInstr &MI,
- SmallVectorImpl<Register> &Ops);
+ SmallVectorImpl<Register> &Ops) const;
/// Replace \p MI with a concat_vectors with \p Ops.
void applyCombineShuffleVector(MachineInstr &MI,
- const ArrayRef<Register> Ops);
- bool matchShuffleToExtract(MachineInstr &MI);
- void applyShuffleToExtract(MachineInstr &MI);
+ const ArrayRef<Register> Ops) const;
+ bool matchShuffleToExtract(MachineInstr &MI) const;
+ void applyShuffleToExtract(MachineInstr &MI) const;
/// Optimize memcpy intrinsics et al, e.g. constant len calls.
/// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
@@ -298,101 +313,105 @@ class CombinerHelper {
/// $addr = G_INDEXED_STORE $val, $base, $offset
/// [...]
/// $whatever = COPY $addr
- bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
+ bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0) const;
- bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
- void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
+ bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
+ void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo) const;
/// Fold (shift (shift base, x), y) -> (shift base (x+y))
- bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
- void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+ bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
+ void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo) const;
/// If we have a shift-by-constant of a bitwise logic op that itself has a
/// shift-by-constant operand with identical opcode, we may be able to convert
/// that into 2 independent shifts followed by the logic op.
bool matchShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
+ ShiftOfShiftedLogic &MatchInfo) const;
void applyShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
+ ShiftOfShiftedLogic &MatchInfo) const;
- bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Transform a multiply by a power-of-2 value to a left shift.
- bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
- void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
+ bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
+ void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal) const;
// Transform a G_SUB with constant on the RHS to G_ADD.
- bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCombineSubToAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
// Transform a G_SHL with an extended source into a narrower shift if
// possible.
- bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
+ bool matchCombineShlOfExtend(MachineInstr &MI,
+ RegisterImmPair &MatchData) const;
void applyCombineShlOfExtend(MachineInstr &MI,
- const RegisterImmPair &MatchData);
+ const RegisterImmPair &MatchData) const;
/// Fold away a merge of an unmerge of the corresponding values.
- bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo);
+ bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo) const;
/// Reduce a shift by a constant to an unmerge and a shift on a half sized
/// type. This will not produce a shift smaller than \p TargetShiftSize.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
- unsigned &ShiftVal);
- void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
- bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
+ unsigned &ShiftVal) const;
+ void applyCombineShiftToUnmerge(MachineInstr &MI,
+ const unsigned &ShiftVal) const;
+ bool tryCombineShiftToUnmerge(MachineInstr &MI,
+ unsigned TargetShiftAmount) const;
/// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
- bool
- matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
- void
- applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
+ bool matchCombineUnmergeMergeToPlainValues(
+ MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
+ void applyCombineUnmergeMergeToPlainValues(
+ MachineInstr &MI, SmallVectorImpl<Register> &Operands) const;
/// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
bool matchCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
+ SmallVectorImpl<APInt> &Csts) const;
void applyCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
+ SmallVectorImpl<APInt> &Csts) const;
/// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
- bool
- matchCombineUnmergeUndef(MachineInstr &MI,
- std::function<void(MachineIRBuilder &)> &MatchInfo);
+ bool matchCombineUnmergeUndef(
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const;
/// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
- bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
- void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+ bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const;
+ void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) const;
/// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
- bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
- void applyCombineUnmergeZExtToZExt(MachineInstr &MI);
+ bool matchCombineUnmergeZExtToZExt(MachineInstr &MI) const;
+ void applyCombineUnmergeZExtToZExt(MachineInstr &MI) const;
/// Transform fp_instr(cst) to constant result of the fp operation.
- void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst);
+ void applyCombineConstantFoldFpUnary(MachineInstr &MI,
+ const ConstantFP *Cst) const;
/// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
- bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
- void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+ bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) const;
+ void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) const;
/// Transform PtrToInt(IntToPtr(x)) to x.
- void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+ void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) const;
/// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
/// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
- bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
- void applyCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
+ bool
+ matchCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute) const;
+ void
+ applyCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute) const;
// Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
- bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
- void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
+ bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const;
+ void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst) const;
/// Transform anyext(trunc(x)) to x.
- bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) const;
/// Transform zext(trunc(x)) to x.
- bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg);
+ bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg) const;
/// Transform trunc (shl x, K) to shl (trunc x), K
/// if K < VT.getScalarSizeInBits().
@@ -401,118 +420,121 @@ class CombinerHelper {
/// if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
/// MidVT is obtained by finding a legal type between the trunc's src and dst
/// types.
- bool matchCombineTruncOfShift(MachineInstr &MI,
- std::pair<MachineInstr *, LLT> &MatchInfo);
- void applyCombineTruncOfShift(MachineInstr &MI,
- std::pair<MachineInstr *, LLT> &MatchInfo);
+ bool
+ matchCombineTruncOfShift(MachineInstr &MI,
+ std::pair<MachineInstr *, LLT> &MatchInfo) const;
+ void
+ applyCombineTruncOfShift(MachineInstr &MI,
+ std::pair<MachineInstr *, LLT> &MatchInfo) const;
/// Return true if any explicit use operand on \p MI is defined by a
/// G_IMPLICIT_DEF.
- bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
+ bool matchAnyExplicitUseIsUndef(MachineInstr &MI) const;
/// Return true if all register explicit use operands on \p MI are defined by
/// a G_IMPLICIT_DEF.
- bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
+ bool matchAllExplicitUsesAreUndef(MachineInstr &MI) const;
/// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
- bool matchUndefShuffleVectorMask(MachineInstr &MI);
+ bool matchUndefShuffleVectorMask(MachineInstr &MI) const;
/// Return true if a G_STORE instruction \p MI is storing an undef value.
- bool matchUndefStore(MachineInstr &MI);
+ bool matchUndefStore(MachineInstr &MI) const;
/// Return true if a G_SELECT instruction \p MI has an undef comparison.
- bool matchUndefSelectCmp(MachineInstr &MI);
+ bool matchUndefSelectCmp(MachineInstr &MI) const;
/// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
- bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI);
+ bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) const;
/// Return true if a G_SELECT instruction \p MI has a constant comparison. If
/// true, \p OpIdx will store the operand index of the known selected value.
- bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
+ bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) const;
/// Replace an instruction with a G_FCONSTANT with value \p C.
- void replaceInstWithFConstant(MachineInstr &MI, double C);
+ void replaceInstWithFConstant(MachineInstr &MI, double C) const;
/// Replace an instruction with an G_FCONSTANT with value \p CFP.
- void replaceInstWithFConstant(MachineInstr &MI, ConstantFP *CFP);
+ void replaceInstWithFConstant(MachineInstr &MI, ConstantFP *CFP) const;
/// Replace an instruction with a G_CONSTANT with value \p C.
- void replaceInstWithConstant(MachineInstr &MI, int64_t C);
+ void replaceInstWithConstant(MachineInstr &MI, int64_t C) const;
/// Replace an instruction with a G_CONSTANT with value \p C.
- void replaceInstWithConstant(MachineInstr &MI, APInt C);
+ void replaceInstWithConstant(MachineInstr &MI, APInt C) const;
/// Replace an instruction with a G_IMPLICIT_DEF.
- void replaceInstWithUndef(MachineInstr &MI);
+ void replaceInstWithUndef(MachineInstr &MI) const;
/// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
- void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
+ void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx) const;
/// Delete \p MI and replace all of its uses with \p Replacement.
- void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
+ void replaceSingleDefInstWithReg(MachineInstr &MI,
+ Register Replacement) const;
/// @brief Replaces the shift amount in \p MI with ShiftAmt % BW
/// @param MI
- void applyFunnelShiftConstantModulo(MachineInstr &MI);
+ void applyFunnelShiftConstantModulo(MachineInstr &MI) const;
/// Return true if \p MOP1 and \p MOP2 are register operands are defined by
/// equivalent instructions.
- bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
+ bool matchEqualDefs(const MachineOperand &MOP1,
+ const MachineOperand &MOP2) const;
/// Return true if \p MOP is defined by a G_CONSTANT or splat with a value equal to
/// \p C.
- bool matchConstantOp(const MachineOperand &MOP, int64_t C);
+ bool matchConstantOp(const MachineOperand &MOP, int64_t C) const;
/// Return true if \p MOP is defined by a G_FCONSTANT or splat with a value exactly
/// equal to \p C.
- bool matchConstantFPOp(const MachineOperand &MOP, double C);
+ bool matchConstantFPOp(const MachineOperand &MOP, double C) const;
/// @brief Checks if constant at \p ConstIdx is larger than \p MI 's bitwidth
/// @param ConstIdx Index of the constant
- bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx);
+ bool matchConstantLargerBitWidth(MachineInstr &MI, unsigned ConstIdx) const;
/// Optimize (cond ? x : x) -> x
- bool matchSelectSameVal(MachineInstr &MI);
+ bool matchSelectSameVal(MachineInstr &MI) const;
/// Optimize (x op x) -> x
- bool matchBinOpSameVal(MachineInstr &MI);
+ bool matchBinOpSameVal(MachineInstr &MI) const;
/// Check if operand \p OpIdx is zero.
- bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
+ bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const;
/// Check if operand \p OpIdx is undef.
- bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
+ bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
/// Check if operand \p OpIdx is known to be a power of 2.
- bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
+ bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
+ unsigned OpIdx) const;
/// Erase \p MI
- void eraseInst(MachineInstr &MI);
+ void eraseInst(MachineInstr &MI) const;
/// Return true if MI is a G_ADD which can be simplified to a G_SUB.
bool matchSimplifyAddToSub(MachineInstr &MI,
- std::tuple<Register, Register> &MatchInfo);
+ std::tuple<Register, Register> &MatchInfo) const;
void applySimplifyAddToSub(MachineInstr &MI,
- std::tuple<Register, Register> &MatchInfo);
+ std::tuple<Register, Register> &MatchInfo) const;
/// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
- bool
- matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
+ bool matchHoistLogicOpWithSameOpcodeHands(
+ MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
/// Replace \p MI with a series of instructions described in \p MatchInfo.
void applyBuildInstructionSteps(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
+ InstructionStepsMatchInfo &MatchInfo) const;
/// Match ashr (shl x, C), C -> sext_inreg (C)
bool matchAshrShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
+ std::tuple<Register, int64_t> &MatchInfo) const;
void applyAshShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
+ std::tuple<Register, int64_t> &MatchInfo) const;
/// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
- bool matchOverlappingAnd(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ bool matchOverlappingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// \return true if \p MI is a G_AND instruction whose operands are x and y
/// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
@@ -520,7 +542,7 @@ class CombinerHelper {
/// \param [in] MI - The G_AND instruction.
/// \param [out] Replacement - A register the G_AND should be replaced with on
/// success.
- bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
+ bool matchRedundantAnd(MachineInstr &MI, Register &Replacement) const;
/// \return true if \p MI is a G_OR instruction whose operands are x and y
/// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
@@ -529,42 +551,45 @@ class CombinerHelper {
/// \param [in] MI - The G_OR instruction.
/// \param [out] Replacement - A register the G_OR should be replaced with on
/// success.
- bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+ bool matchRedundantOr(MachineInstr &MI, Register &Replacement) const;
/// \return true if \p MI is a G_SEXT_INREG that can be erased.
- bool matchRedundantSExtInReg(MachineInstr &MI);
+ bool matchRedundantSExtInReg(MachineInstr &MI) const;
/// Combine inverting a result of a compare into the opposite cond code.
- bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
- void applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+ bool matchNotCmp(MachineInstr &MI,
+ SmallVectorImpl<Register> &RegsToNegate) const;
+ void applyNotCmp(MachineInstr &MI,
+ SmallVectorImpl<Register> &RegsToNegate) const;
/// Fold (xor (and x, y), y) -> (and (not x), y)
///{
bool matchXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
+ std::pair<Register, Register> &MatchInfo) const;
void applyXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
+ std::pair<Register, Register> &MatchInfo) const;
///}
/// Combine G_PTR_ADD with nullptr to G_INTTOPTR
- bool matchPtrAddZero(MachineInstr &MI);
- void applyPtrAddZero(MachineInstr &MI);
+ bool matchPtrAddZero(MachineInstr &MI) const;
+ void applyPtrAddZero(MachineInstr &MI) const;
/// Combine G_UREM x, (known power of 2) to an add and bitmasking.
- void applySimplifyURemByPow2(MachineInstr &MI);
+ void applySimplifyURemByPow2(MachineInstr &MI) const;
/// Push a binary operator through a select on constants.
///
/// binop (select cond, K0, K1), K2 ->
/// select cond, (binop K0, K2), (binop K1, K2)
- bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
- void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);
+ bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo) const;
+ void applyFoldBinOpIntoSelect(MachineInstr &MI,
+ const unsigned &SelectOpNo) const;
bool matchCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
+ SmallVectorImpl<Register> &MatchInfo) const;
void applyCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
+ SmallVectorImpl<Register> &MatchInfo) const;
/// Match expression trees of the form
///
@@ -575,145 +600,148 @@ class CombinerHelper {
///
/// And check if the tree can be replaced with a M-bit load + possibly a
/// bswap.
- bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo) const;
- bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);
- void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);
+ bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const;
+ void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI) const;
- bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);
- void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);
+ bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const;
+ void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg) const;
bool matchExtractAllEltsFromBuildVector(
MachineInstr &MI,
- SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
+ SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
void applyExtractAllEltsFromBuildVector(
MachineInstr &MI,
- SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
+ SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo) const;
/// Use a function which takes in a MachineIRBuilder to perform a combine.
/// By default, it erases the instruction \p MI from the function.
- void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo);
+ void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Use a function which takes in a MachineIRBuilder to perform a combine.
/// This variant does not erase \p MI after calling the build function.
- void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);
+ void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo) const;
- bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo);
- bool matchFunnelShiftToRotate(MachineInstr &MI);
- void applyFunnelShiftToRotate(MachineInstr &MI);
- bool matchRotateOutOfRange(MachineInstr &MI);
- void applyRotateOutOfRange(MachineInstr &MI);
+ bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo) const;
+ bool matchFunnelShiftToRotate(MachineInstr &MI) const;
+ void applyFunnelShiftToRotate(MachineInstr &MI) const;
+ bool matchRotateOutOfRange(MachineInstr &MI) const;
+ void applyRotateOutOfRange(MachineInstr &MI) const;
- bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
- void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo);
+ bool matchUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
+ void applyUseVectorTruncate(MachineInstr &MI, Register &MatchInfo) const;
/// \returns true if a G_ICMP instruction \p MI can be replaced with a true
/// or false constant based off of KnownBits information.
- bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);
+ bool matchICmpToTrueFalseKnownBits(MachineInstr &MI,
+ int64_t &MatchInfo) const;
/// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
/// KnownBits information.
- bool
- matchICmpToLHSKnownBits(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ bool matchICmpToLHSKnownBits(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
- bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Match: and (lshr x, cst), mask -> ubfx x, cst, width
- bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchBitfieldExtractFromAnd(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
- bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchBitfieldExtractFromShr(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Match: shr (and x, n), k -> ubfx x, pos, width
- bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchBitfieldExtractFromShrAnd(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
// Helpers for reassociation:
bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS,
MachineInstr *RHS,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS,
- MachineInstr *RHS, BuildFnTy &MatchInfo);
+ MachineInstr *RHS,
+ BuildFnTy &MatchInfo) const;
/// Reassociate pointer calculations with G_ADD involved, to allow better
/// addressing mode usage.
- bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Try to reassociate to reassociate operands of a commutative binop.
bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
- Register Op1, BuildFnTy &MatchInfo);
+ Register Op1, BuildFnTy &MatchInfo) const;
/// Reassociate commutative binary operations like G_ADD.
- bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Do constant folding when opportunities are exposed after MIR building.
- bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo);
+ bool matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) const;
/// Do constant folding when opportunities are exposed after MIR building.
- bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo);
+ bool matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) const;
/// Do constant FP folding when opportunities are exposed after MIR building.
- bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP* &MatchInfo);
+ bool matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP *&MatchInfo) const;
/// Constant fold G_FMA/G_FMAD.
- bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo);
+ bool matchConstantFoldFMA(MachineInstr &MI, ConstantFP *&MatchInfo) const;
/// \returns true if it is possible to narrow the width of a scalar binop
/// feeding a G_AND instruction \p MI.
- bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Given an G_UDIV \p MI expressing a divide by constant, return an
/// expression that implements it by multiplying by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
- MachineInstr *buildUDivUsingMul(MachineInstr &MI);
+ MachineInstr *buildUDivUsingMul(MachineInstr &MI) const;
/// Combine G_UDIV by constant into a multiply by magic constant.
- bool matchUDivByConst(MachineInstr &MI);
- void applyUDivByConst(MachineInstr &MI);
+ bool matchUDivByConst(MachineInstr &MI) const;
+ void applyUDivByConst(MachineInstr &MI) const;
/// Given an G_SDIV \p MI expressing a signed divide by constant, return an
/// expression that implements it by multiplying by a magic number.
/// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
- MachineInstr *buildSDivUsingMul(MachineInstr &MI);
- bool matchSDivByConst(MachineInstr &MI);
- void applySDivByConst(MachineInstr &MI);
+ MachineInstr *buildSDivUsingMul(MachineInstr &MI) const;
+ bool matchSDivByConst(MachineInstr &MI) const;
+ void applySDivByConst(MachineInstr &MI) const;
/// Given an G_SDIV \p MI expressing a signed divided by a pow2 constant,
/// return expressions that implements it by shifting.
- bool matchDivByPow2(MachineInstr &MI, bool IsSigned);
- void applySDivByPow2(MachineInstr &MI);
+ bool matchDivByPow2(MachineInstr &MI, bool IsSigned) const;
+ void applySDivByPow2(MachineInstr &MI) const;
/// Given an G_UDIV \p MI expressing an unsigned divided by a pow2 constant,
/// return expressions that implements it by shifting.
- void applyUDivByPow2(MachineInstr &MI);
+ void applyUDivByPow2(MachineInstr &MI) const;
// G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
- bool matchUMulHToLShr(MachineInstr &MI);
- void applyUMulHToLShr(MachineInstr &MI);
+ bool matchUMulHToLShr(MachineInstr &MI) const;
+ void applyUMulHToLShr(MachineInstr &MI) const;
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
- bool tryCombine(MachineInstr &MI);
+ bool tryCombine(MachineInstr &MI) const;
/// Emit loads and stores that perform the given memcpy.
/// Assumes \p MI is a G_MEMCPY_INLINE
/// TODO: implement dynamically sized inline memcpy,
/// and rename: s/bool tryEmit/void emit/
- bool tryEmitMemcpyInline(MachineInstr &MI);
+ bool tryEmitMemcpyInline(MachineInstr &MI) const;
/// Match:
/// (G_UMULO x, 2) -> (G_UADDO x, x)
/// (G_SMULO x, 2) -> (G_SADDO x, x)
- bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Match:
/// (G_*MULO x, 0) -> 0 + no carry out
- bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Match:
/// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
/// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
- bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Transform (fadd x, fneg(y)) -> (fsub x, y)
/// (fadd fneg(x), y) -> (fsub y, x)
@@ -722,79 +750,85 @@ class CombinerHelper {
/// (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
/// (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
/// (fma fneg(x), fneg(y), z) -> (fma x, y, z)
- bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo) const;
- bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo);
- void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo);
+ bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
+ void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) const;
bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
bool &HasFMAD, bool &Aggressive,
- bool CanReassociate = false);
+ bool CanReassociate = false) const;
/// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
/// (fadd (fmul x, y), z) -> (fmad x, y, z)
- bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
/// (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
/// (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
// Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
// -> (fma x, y, (fma (fpext u), (fpext v), z))
// (fadd (fmad x, y, (fpext (fmul u, v))), z)
// -> (fmad x, y, (fmad (fpext u), (fpext v), z))
- bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ bool
+ matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
/// (fsub (fmul x, y), z) -> (fmad x, y, -z)
- bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
/// (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Transform (fsub (fpext (fmul x, y)), z)
/// -> (fma (fpext x), (fpext y), (fneg z))
/// (fsub (fpext (fmul x, y)), z)
/// -> (fmad (fpext x), (fpext y), (fneg z))
bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Transform (fsub (fpext (fneg (fmul x, y))), z)
/// -> (fneg (fma (fpext x), (fpext y), z))
/// (fsub (fpext (fneg (fmul x, y))), z)
/// -> (fneg (fmad (fpext x), (fpext y), z))
bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
- bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info);
+ bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info) const;
/// Transform G_ADD(x, G_SUB(y, x)) to y.
/// Transform G_ADD(G_SUB(y, x), x) to y.
- bool matchAddSubSameReg(MachineInstr &MI, Register &Src);
+ bool matchAddSubSameReg(MachineInstr &MI, Register &Src) const;
- bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo);
- bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo);
- bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo);
+ bool matchBuildVectorIdentityFold(MachineInstr &MI,
+ Register &MatchInfo) const;
+ bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo) const;
+ bool matchTruncLshrBuildVectorFold(MachineInstr &MI,
+ Register &MatchInfo) const;
/// Transform:
/// (x + y) - y -> x
/// (x + y) - x -> y
/// x - (y + x) -> 0 - y
/// x - (x + z) -> 0 - z
- bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// \returns true if it is possible to simplify a select instruction \p MI
/// to a min/max instruction of some sort.
- bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchSimplifySelectToMinMax(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Transform:
/// (X + Y) == X -> Y == 0
@@ -803,144 +837,157 @@ class CombinerHelper {
/// (X + Y) != X -> Y != 0
/// (X - Y) != X -> Y != 0
/// (X ^ Y) != X -> Y != 0
- bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchRedundantBinOpInEquality(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
/// Match shifts greater or equal to the bitwidth of the operation.
- bool matchShiftsTooBig(MachineInstr &MI);
+ bool matchShiftsTooBig(MachineInstr &MI) const;
/// Match constant LHS ops that should be commuted.
- bool matchCommuteConstantToRHS(MachineInstr &MI);
+ bool matchCommuteConstantToRHS(MachineInstr &MI) const;
/// Combine sext of trunc.
- bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchSextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Combine zext of trunc.
- bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchZextOfTrunc(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Combine zext nneg to sext.
- bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchNonNegZext(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Match constant LHS FP ops that should be commuted.
- bool matchCommuteFPConstantToRHS(MachineInstr &MI);
+ bool matchCommuteFPConstantToRHS(MachineInstr &MI) const;
// Given a binop \p MI, commute operands 1 and 2.
- void applyCommuteBinOpOperands(MachineInstr &MI);
+ void applyCommuteBinOpOperands(MachineInstr &MI) const;
/// Combine select to integer min/max.
- bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchSelectIMinMax(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Combine selects.
- bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Combine ands.
- bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Combine ors.
- bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// trunc (binop X, C) --> binop (trunc X, trunc C).
bool matchNarrowBinop(const MachineInstr &TruncMI,
- const MachineInstr &BinopMI, BuildFnTy &MatchInfo);
+ const MachineInstr &BinopMI,
+ BuildFnTy &MatchInfo) const;
- bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo);
+ bool matchCastOfInteger(const MachineInstr &CastMI, APInt &MatchInfo) const;
/// Combine addos.
- bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Combine extract vector element.
- bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchExtractVectorElement(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Combine extract vector element with a build vector on the vector register.
bool matchExtractVectorElementWithBuildVector(const MachineInstr &MI,
const MachineInstr &MI2,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Combine extract vector element with a build vector trunc on the vector
/// register.
- bool matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO,
- BuildFnTy &MatchInfo);
+ bool
+ matchExtractVectorElementWithBuildVectorTrunc(const MachineOperand &MO,
+ BuildFnTy &MatchInfo) const;
/// Combine extract vector element with a shuffle vector on the vector
/// register.
bool matchExtractVectorElementWithShuffleVector(const MachineInstr &MI,
const MachineInstr &MI2,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
/// Combine extract vector element with a insert vector element on the vector
/// register and different indices.
- bool matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO,
- BuildFnTy &MatchInfo);
+ bool
+ matchExtractVectorElementWithDifferentIndices(const MachineOperand &MO,
+ BuildFnTy &MatchInfo) const;
/// Remove references to rhs if it is undef
- bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchShuffleUndefRHS(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Turn shuffle a, b, mask -> shuffle undef, b, mask iff mask does not
/// reference a.
- bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchShuffleDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo) const;
/// Use a function which takes in a MachineIRBuilder to perform a combine.
/// By default, it erases the instruction def'd on \p MO from the function.
- void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ void applyBuildFnMO(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Match FPOWI if it's safe to extend it into a series of multiplications.
- bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent);
+ bool matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) const;
/// Expands FPOWI into a series of multiplications and a division if the
/// exponent is negative.
- void applyExpandFPowI(MachineInstr &MI, int64_t Exponent);
+ void applyExpandFPowI(MachineInstr &MI, int64_t Exponent) const;
/// Combine insert vector element OOB.
- bool matchInsertVectorElementOOB(MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchInsertVectorElementOOB(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
bool matchFreezeOfSingleMaybePoisonOperand(MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
- bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchAddOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
- bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchMulOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
- bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchSubOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
- bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo);
+ bool matchShlOfVScale(const MachineOperand &MO, BuildFnTy &MatchInfo) const;
/// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
bool matchTruncateOfExt(const MachineInstr &Root, const MachineInstr &ExtMI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
bool matchCastOfSelect(const MachineInstr &Cast, const MachineInstr &SelectMI,
- BuildFnTy &MatchInfo);
- bool matchFoldAPlusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
+ bool matchFoldAPlusC1MinusC2(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
- bool matchFoldC2MinusAPlusC1(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchFoldC2MinusAPlusC1(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
- bool matchFoldAMinusC1MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchFoldAMinusC1MinusC2(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
- bool matchFoldC1Minus2MinusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchFoldC1Minus2MinusC2(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
// fold ((A-C1)+C2) -> (A+(C2-C1))
- bool matchFoldAMinusC1PlusC2(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchFoldAMinusC1PlusC2(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
bool matchExtOfExt(const MachineInstr &FirstMI, const MachineInstr &SecondMI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
bool matchCastOfBuildVector(const MachineInstr &CastMI,
- const MachineInstr &BVMI, BuildFnTy &MatchInfo);
+ const MachineInstr &BVMI,
+ BuildFnTy &MatchInfo) const;
- bool matchCanonicalizeICmp(const MachineInstr &MI, BuildFnTy &MatchInfo);
- bool matchCanonicalizeFCmp(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchCanonicalizeICmp(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
+ bool matchCanonicalizeFCmp(const MachineInstr &MI,
+ BuildFnTy &MatchInfo) const;
// unmerge_values(anyext(build vector)) -> build vector(anyext)
bool matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
// merge_values(_, undef) -> anyext
- bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchMergeXAndUndef(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
// merge_values(_, zero) -> zext
- bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchMergeXAndZero(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
// overflow sub
- bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo);
+ bool matchSuboCarryOut(const MachineInstr &MI, BuildFnTy &MatchInfo) const;
private:
/// Checks for legality of an indexed variant of \p LdSt.
@@ -950,14 +997,14 @@ class CombinerHelper {
///
/// \returns true if a candidate is found.
bool findPostIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
- Register &Offset, bool &RematOffset);
+ Register &Offset, bool &RematOffset) const;
/// Given a non-indexed load or store instruction \p MI, find an offset that
/// can be usefully and legally folded into it as a pre-indexing operation.
///
/// \returns true if a candidate is found.
bool findPreIndexCandidate(GLoadStore &MI, Register &Addr, Register &Base,
- Register &Offset);
+ Register &Offset) const;
/// Helper function for matchLoadOrCombine. Searches for Registers
/// which may have been produced by a load instruction + some arithmetic.
@@ -983,12 +1030,12 @@ class CombinerHelper {
findLoadOffsetsForLoadOrCombine(
SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
const SmallVector<Register, 8> &RegsToVisit,
- const unsigned MemSizeInBits);
+ const unsigned MemSizeInBits) const;
/// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
/// a re-association of its operands would break an existing legal addressing
/// mode that the address computation currently represents.
- bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd);
+ bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd) const;
/// Behavior when a floating point min/max is given one NaN and one
/// non-NaN as input.
@@ -1031,36 +1078,36 @@ class CombinerHelper {
/// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
/// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
- Register FalseVal, BuildFnTy &MatchInfo);
+ Register FalseVal, BuildFnTy &MatchInfo) const;
/// Try to fold selects to logical operations.
- bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo);
+ bool tryFoldBoolSelectToLogic(GSelect *Select, BuildFnTy &MatchInfo) const;
- bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo);
+ bool tryFoldSelectOfConstants(GSelect *Select, BuildFnTy &MatchInfo) const;
- bool isOneOrOneSplat(Register Src, bool AllowUndefs);
- bool isZeroOrZeroSplat(Register Src, bool AllowUndefs);
+ bool isOneOrOneSplat(Register Src, bool AllowUndefs) const;
+ bool isZeroOrZeroSplat(Register Src, bool AllowUndefs) const;
bool isConstantSplatVector(Register Src, int64_t SplatValue,
- bool AllowUndefs);
+ bool AllowUndefs) const;
bool isConstantOrConstantVectorI(Register Src) const;
- std::optional<APInt> getConstantOrConstantSplatVector(Register Src);
+ std::optional<APInt> getConstantOrConstantSplatVector(Register Src) const;
/// Fold (icmp Pred1 V1, C1) && (icmp Pred2 V2, C2)
/// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
/// into a single comparison using range-based reasoning.
bool tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
- BuildFnTy &MatchInfo);
+ BuildFnTy &MatchInfo) const;
// Simplify (cmp cc0 x, y) (&& or ||) (cmp cc1 x, y) -> cmp cc2 x, y.
- bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo);
+ bool tryFoldLogicOfFCmps(GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const;
bool isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const;
bool constantFoldICmp(const GICmp &ICmp, const GIConstant &LHSCst,
- const GIConstant &RHSCst, BuildFnTy &MatchInfo);
+ const GIConstant &RHSCst, BuildFnTy &MatchInfo) const;
bool constantFoldFCmp(const GFCmp &FCmp, const GFConstant &LHSCst,
- const GFConstant &RHSCst, BuildFnTy &MatchInfo);
+ const GFConstant &RHSCst, BuildFnTy &MatchInfo) const;
};
} // namespace llvm
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index a2737995446526..eadc0487830918 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -207,26 +207,27 @@ const RegisterBank *CombinerHelper::getRegBank(Register Reg) const {
return RBI->getRegBank(Reg, MRI, *TRI);
}
-void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) {
+void CombinerHelper::setRegBank(Register Reg,
+ const RegisterBank *RegBank) const {
if (RegBank)
MRI.setRegBank(Reg, *RegBank);
}
-bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
+bool CombinerHelper::tryCombineCopy(MachineInstr &MI) const {
if (matchCombineCopy(MI)) {
applyCombineCopy(MI);
return true;
}
return false;
}
-bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
+bool CombinerHelper::matchCombineCopy(MachineInstr &MI) const {
if (MI.getOpcode() != TargetOpcode::COPY)
return false;
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
return canReplaceReg(DstReg, SrcReg, MRI);
}
-void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
+void CombinerHelper::applyCombineCopy(MachineInstr &MI) const {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
replaceRegWith(MRI, DstReg, SrcReg);
@@ -234,7 +235,7 @@ void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
}
bool CombinerHelper::matchFreezeOfSingleMaybePoisonOperand(
- MachineInstr &MI, BuildFnTy &MatchInfo) {
+ MachineInstr &MI, BuildFnTy &MatchInfo) const {
// Ported from InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating.
Register DstOp = MI.getOperand(0).getReg();
Register OrigOp = MI.getOperand(1).getReg();
@@ -303,8 +304,8 @@ bool CombinerHelper::matchFreezeOfSingleMaybePoisonOperand(
return true;
}
-bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI,
- SmallVector<Register> &Ops) {
+bool CombinerHelper::matchCombineConcatVectors(
+ MachineInstr &MI, SmallVector<Register> &Ops) const {
assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
"Invalid instruction");
bool IsUndef = true;
@@ -361,8 +362,8 @@ bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyCombineConcatVectors(MachineInstr &MI,
- SmallVector<Register> &Ops) {
+void CombinerHelper::applyCombineConcatVectors(
+ MachineInstr &MI, SmallVector<Register> &Ops) const {
// We determined that the concat_vectors can be flatten.
// Generate the flattened build_vector.
Register DstReg = MI.getOperand(0).getReg();
@@ -383,8 +384,8 @@ void CombinerHelper::applyCombineConcatVectors(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::matchCombineShuffleConcat(MachineInstr &MI,
- SmallVector<Register> &Ops) {
+bool CombinerHelper::matchCombineShuffleConcat(
+ MachineInstr &MI, SmallVector<Register> &Ops) const {
ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
auto ConcatMI1 =
dyn_cast<GConcatVectors>(MRI.getVRegDef(MI.getOperand(1).getReg()));
@@ -443,8 +444,8 @@ bool CombinerHelper::matchCombineShuffleConcat(MachineInstr &MI,
return !Ops.empty();
}
-void CombinerHelper::applyCombineShuffleConcat(MachineInstr &MI,
- SmallVector<Register> &Ops) {
+void CombinerHelper::applyCombineShuffleConcat(
+ MachineInstr &MI, SmallVector<Register> &Ops) const {
LLT SrcTy;
for (Register &Reg : Ops) {
if (Reg != 0)
@@ -469,7 +470,7 @@ void CombinerHelper::applyCombineShuffleConcat(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
+bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) const {
SmallVector<Register, 4> Ops;
if (matchCombineShuffleVector(MI, Ops)) {
applyCombineShuffleVector(MI, Ops);
@@ -478,8 +479,8 @@ bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
return false;
}
-bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
- SmallVectorImpl<Register> &Ops) {
+bool CombinerHelper::matchCombineShuffleVector(
+ MachineInstr &MI, SmallVectorImpl<Register> &Ops) const {
assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
"Invalid instruction kind");
LLT DstType = MRI.getType(MI.getOperand(0).getReg());
@@ -554,8 +555,8 @@ bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
- const ArrayRef<Register> Ops) {
+void CombinerHelper::applyCombineShuffleVector(
+ MachineInstr &MI, const ArrayRef<Register> Ops) const {
Register DstReg = MI.getOperand(0).getReg();
Builder.setInsertPt(*MI.getParent(), MI);
Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
@@ -569,7 +570,7 @@ void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::matchShuffleToExtract(MachineInstr &MI) {
+bool CombinerHelper::matchShuffleToExtract(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
"Invalid instruction kind");
@@ -577,7 +578,7 @@ bool CombinerHelper::matchShuffleToExtract(MachineInstr &MI) {
return Mask.size() == 1;
}
-void CombinerHelper::applyShuffleToExtract(MachineInstr &MI) {
+void CombinerHelper::applyShuffleToExtract(MachineInstr &MI) const {
Register DstReg = MI.getOperand(0).getReg();
Builder.setInsertPt(*MI.getParent(), MI);
@@ -690,7 +691,7 @@ static void InsertInsnsWithoutSideEffectsBeforeUse(
}
} // end anonymous namespace
-bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
+bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) const {
PreferredTuple Preferred;
if (matchCombineExtendingLoads(MI, Preferred)) {
applyCombineExtendingLoads(MI, Preferred);
@@ -717,8 +718,8 @@ static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) {
return CandidateLoadOpc;
}
-bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
- PreferredTuple &Preferred) {
+bool CombinerHelper::matchCombineExtendingLoads(
+ MachineInstr &MI, PreferredTuple &Preferred) const {
// We match the loads and follow the uses to the extend instead of matching
// the extends and following the def to the load. This is because the load
// must remain in the same position for correctness (unless we also add code
@@ -793,8 +794,8 @@ bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
- PreferredTuple &Preferred) {
+void CombinerHelper::applyCombineExtendingLoads(
+ MachineInstr &MI, PreferredTuple &Preferred) const {
// Rewrite the load to the chosen extending load.
Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
@@ -900,7 +901,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
}
bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_AND);
// If we have the following code:
@@ -982,7 +983,7 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
}
bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
- const MachineInstr &UseMI) {
+ const MachineInstr &UseMI) const {
assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
"shouldn't consider debug uses");
assert(DefMI.getParent() == UseMI.getParent());
@@ -998,7 +999,7 @@ bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
}
bool CombinerHelper::dominates(const MachineInstr &DefMI,
- const MachineInstr &UseMI) {
+ const MachineInstr &UseMI) const {
assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
"shouldn't consider debug uses");
if (MDT)
@@ -1009,7 +1010,7 @@ bool CombinerHelper::dominates(const MachineInstr &DefMI,
return isPredecessor(DefMI, UseMI);
}
-bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
+bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
Register SrcReg = MI.getOperand(1).getReg();
Register LoadUser = SrcReg;
@@ -1036,14 +1037,14 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
return false;
}
-void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
+void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
MI.eraseFromParent();
}
bool CombinerHelper::matchSextInRegOfLoad(
- MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
Register DstReg = MI.getOperand(0).getReg();
@@ -1095,7 +1096,7 @@ bool CombinerHelper::matchSextInRegOfLoad(
}
void CombinerHelper::applySextInRegOfLoad(
- MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
Register LoadReg;
unsigned ScalarSizeBits;
@@ -1185,7 +1186,7 @@ static cl::opt<unsigned> PostIndexUseThreshold(
bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr,
Register &Base, Register &Offset,
- bool &RematOffset) {
+ bool &RematOffset) const {
// We're looking for the following pattern, for either load or store:
// %baseptr:_(p0) = ...
// G_STORE %val(s64), %baseptr(p0)
@@ -1280,7 +1281,8 @@ bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr,
}
bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr,
- Register &Base, Register &Offset) {
+ Register &Base,
+ Register &Offset) const {
auto &MF = *LdSt.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering();
@@ -1335,8 +1337,8 @@ bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr,
return RealUse;
}
-bool CombinerHelper::matchCombineExtractedVectorLoad(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchCombineExtractedVectorLoad(
+ MachineInstr &MI, BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
// Check if there is a load that defines the vector being extracted from.
@@ -1442,7 +1444,7 @@ bool CombinerHelper::matchCombineExtractedVectorLoad(MachineInstr &MI,
}
bool CombinerHelper::matchCombineIndexedLoadStore(
- MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
+ MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const {
auto &LdSt = cast<GLoadStore>(MI);
if (LdSt.isAtomic())
@@ -1459,7 +1461,7 @@ bool CombinerHelper::matchCombineIndexedLoadStore(
}
void CombinerHelper::applyCombineIndexedLoadStore(
- MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
+ MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) const {
MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
unsigned Opcode = MI.getOpcode();
bool IsStore = Opcode == TargetOpcode::G_STORE;
@@ -1494,7 +1496,7 @@ void CombinerHelper::applyCombineIndexedLoadStore(
}
bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
- MachineInstr *&OtherMI) {
+ MachineInstr *&OtherMI) const {
unsigned Opcode = MI.getOpcode();
bool IsDiv, IsSigned;
@@ -1557,7 +1559,7 @@ bool CombinerHelper::matchCombineDivRem(MachineInstr &MI,
}
void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
- MachineInstr *&OtherMI) {
+ MachineInstr *&OtherMI) const {
unsigned Opcode = MI.getOpcode();
assert(OtherMI && "OtherMI shouldn't be empty.");
@@ -1588,8 +1590,8 @@ void CombinerHelper::applyCombineDivRem(MachineInstr &MI,
OtherMI->eraseFromParent();
}
-bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
- MachineInstr *&BrCond) {
+bool CombinerHelper::matchOptBrCondByInvertingCond(
+ MachineInstr &MI, MachineInstr *&BrCond) const {
assert(MI.getOpcode() == TargetOpcode::G_BR);
// Try to match the following:
@@ -1622,8 +1624,8 @@ bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI,
MBB->isLayoutSuccessor(BrCondTarget);
}
-void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
- MachineInstr *&BrCond) {
+void CombinerHelper::applyOptBrCondByInvertingCond(
+ MachineInstr &MI, MachineInstr *&BrCond) const {
MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
Builder.setInstrAndDebugLoc(*BrCond);
LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
@@ -1647,8 +1649,7 @@ void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI,
Observer.changedInstr(*BrCond);
}
-
-bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
+bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) const {
MachineIRBuilder HelperBuilder(MI);
GISelObserverWrapper DummyObserver;
LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
@@ -1656,7 +1657,8 @@ bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) {
LegalizerHelper::LegalizeResult::Legalized;
}
-bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
+bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI,
+ unsigned MaxLen) const {
MachineIRBuilder HelperBuilder(MI);
GISelObserverWrapper DummyObserver;
LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder);
@@ -1709,8 +1711,8 @@ static APFloat constantFoldFpUnary(const MachineInstr &MI,
return Result;
}
-void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
- const ConstantFP *Cst) {
+void CombinerHelper::applyCombineConstantFoldFpUnary(
+ MachineInstr &MI, const ConstantFP *Cst) const {
APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue());
const ConstantFP *NewCst = ConstantFP::get(Builder.getContext(), Folded);
Builder.buildFConstant(MI.getOperand(0), *NewCst);
@@ -1718,7 +1720,7 @@ void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
}
bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
- PtrAddChain &MatchInfo) {
+ PtrAddChain &MatchInfo) const {
// We're trying to match the following pattern:
// %t1 = G_PTR_ADD %base, G_CONSTANT imm1
// %root = G_PTR_ADD %t1, G_CONSTANT imm2
@@ -1780,7 +1782,7 @@ bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
}
void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
- PtrAddChain &MatchInfo) {
+ PtrAddChain &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
MachineIRBuilder MIB(MI);
LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
@@ -1793,7 +1795,7 @@ void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
}
bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
- RegisterImmPair &MatchInfo) {
+ RegisterImmPair &MatchInfo) const {
// We're trying to match the following pattern with any of
// G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
// %t1 = SHIFT %base, G_CONSTANT imm1
@@ -1838,7 +1840,7 @@ bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
}
void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
- RegisterImmPair &MatchInfo) {
+ RegisterImmPair &MatchInfo) const {
unsigned Opcode = MI.getOpcode();
assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
@@ -1869,8 +1871,8 @@ void CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
Observer.changedInstr(MI);
}
-bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo) {
+bool CombinerHelper::matchShiftOfShiftedLogic(
+ MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const {
// We're trying to match the following pattern with any of
// G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
// with any of G_AND/G_OR/G_XOR logic instructions.
@@ -1950,8 +1952,8 @@ bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo) {
+void CombinerHelper::applyShiftOfShiftedLogic(
+ MachineInstr &MI, ShiftOfShiftedLogic &MatchInfo) const {
unsigned Opcode = MI.getOpcode();
assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
@@ -1989,7 +1991,8 @@ void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchCommuteShift(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL");
// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
@@ -2025,7 +2028,7 @@ bool CombinerHelper::matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) {
}
bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
- unsigned &ShiftVal) {
+ unsigned &ShiftVal) const {
assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
auto MaybeImmVal =
getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
@@ -2037,7 +2040,7 @@ bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
}
void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
- unsigned &ShiftVal) {
+ unsigned &ShiftVal) const {
assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
MachineIRBuilder MIB(MI);
LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
@@ -2051,7 +2054,7 @@ void CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
}
bool CombinerHelper::matchCombineSubToAdd(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GSub &Sub = cast<GSub>(MI);
LLT Ty = MRI.getType(Sub.getReg(0));
@@ -2077,7 +2080,7 @@ bool CombinerHelper::matchCombineSubToAdd(MachineInstr &MI,
// shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
- RegisterImmPair &MatchData) {
+ RegisterImmPair &MatchData) const {
assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
if (!getTargetLowering().isDesirableToPullExtFromShl(MI))
return false;
@@ -2116,8 +2119,8 @@ bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize;
}
-void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
- const RegisterImmPair &MatchData) {
+void CombinerHelper::applyCombineShlOfExtend(
+ MachineInstr &MI, const RegisterImmPair &MatchData) const {
Register ExtSrcReg = MatchData.Reg;
int64_t ShiftAmtVal = MatchData.Imm;
@@ -2130,7 +2133,7 @@ void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
}
bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
GMerge &Merge = cast<GMerge>(MI);
SmallVector<Register, 16> MergedValues;
for (unsigned I = 0; I < Merge.getNumSources(); ++I)
@@ -2157,7 +2160,7 @@ static Register peekThroughBitcast(Register Reg,
}
bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
- MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
+ MachineInstr &MI, SmallVectorImpl<Register> &Operands) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
auto &Unmerge = cast<GUnmerge>(MI);
@@ -2181,7 +2184,7 @@ bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
}
void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
- MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
+ MachineInstr &MI, SmallVectorImpl<Register> &Operands) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
assert((MI.getNumOperands() - 1 == Operands.size()) &&
@@ -2211,8 +2214,8 @@ void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
MI.eraseFromParent();
}
-bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts) {
+bool CombinerHelper::matchCombineUnmergeConstant(
+ MachineInstr &MI, SmallVectorImpl<APInt> &Csts) const {
unsigned SrcIdx = MI.getNumOperands() - 1;
Register SrcReg = MI.getOperand(SrcIdx).getReg();
MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
@@ -2236,8 +2239,8 @@ bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts) {
+void CombinerHelper::applyCombineUnmergeConstant(
+ MachineInstr &MI, SmallVectorImpl<APInt> &Csts) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
assert((MI.getNumOperands() - 1 == Csts.size()) &&
@@ -2252,7 +2255,8 @@ void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
}
bool CombinerHelper::matchCombineUnmergeUndef(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
unsigned SrcIdx = MI.getNumOperands() - 1;
Register SrcReg = MI.getOperand(SrcIdx).getReg();
MatchInfo = [&MI](MachineIRBuilder &B) {
@@ -2265,7 +2269,8 @@ bool CombinerHelper::matchCombineUnmergeUndef(
return isa<GImplicitDef>(MRI.getVRegDef(SrcReg));
}
-bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
+bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(
+ MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
if (MRI.getType(MI.getOperand(0).getReg()).isVector() ||
@@ -2279,14 +2284,15 @@ bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
return true;
}
-void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
+void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(
+ MachineInstr &MI) const {
Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
Register Dst0Reg = MI.getOperand(0).getReg();
Builder.buildTrunc(Dst0Reg, SrcReg);
MI.eraseFromParent();
}
-bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
+bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
Register Dst0Reg = MI.getOperand(0).getReg();
@@ -2312,7 +2318,7 @@ bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
}
-void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
+void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
"Expected an unmerge");
@@ -2346,7 +2352,7 @@ void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
unsigned TargetShiftSize,
- unsigned &ShiftVal) {
+ unsigned &ShiftVal) const {
assert((MI.getOpcode() == TargetOpcode::G_SHL ||
MI.getOpcode() == TargetOpcode::G_LSHR ||
MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
@@ -2369,8 +2375,8 @@ bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
return ShiftVal >= Size / 2 && ShiftVal < Size;
}
-void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
- const unsigned &ShiftVal) {
+void CombinerHelper::applyCombineShiftToUnmerge(
+ MachineInstr &MI, const unsigned &ShiftVal) const {
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
LLT Ty = MRI.getType(SrcReg);
@@ -2441,8 +2447,8 @@ void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
- unsigned TargetShiftAmount) {
+bool CombinerHelper::tryCombineShiftToUnmerge(
+ MachineInstr &MI, unsigned TargetShiftAmount) const {
unsigned ShiftAmt;
if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
applyCombineShiftToUnmerge(MI, ShiftAmt);
@@ -2452,7 +2458,8 @@ bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
return false;
}
-bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
+bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI,
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
@@ -2461,14 +2468,16 @@ bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
}
-void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
+void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI,
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
Register DstReg = MI.getOperand(0).getReg();
Builder.buildCopy(DstReg, Reg);
MI.eraseFromParent();
}
-void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
+void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI,
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
Register DstReg = MI.getOperand(0).getReg();
Builder.buildZExtOrTrunc(DstReg, Reg);
@@ -2476,7 +2485,7 @@ void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
}
bool CombinerHelper::matchCombineAddP2IToPtrAdd(
- MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
+ MachineInstr &MI, std::pair<Register, bool> &PtrReg) const {
assert(MI.getOpcode() == TargetOpcode::G_ADD);
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -2501,7 +2510,7 @@ bool CombinerHelper::matchCombineAddP2IToPtrAdd(
}
void CombinerHelper::applyCombineAddP2IToPtrAdd(
- MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
+ MachineInstr &MI, std::pair<Register, bool> &PtrReg) const {
Register Dst = MI.getOperand(0).getReg();
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -2519,7 +2528,7 @@ void CombinerHelper::applyCombineAddP2IToPtrAdd(
}
bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
- APInt &NewCst) {
+ APInt &NewCst) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register LHS = PtrAdd.getBaseReg();
Register RHS = PtrAdd.getOffsetReg();
@@ -2540,7 +2549,7 @@ bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
}
void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
- APInt &NewCst) {
+ APInt &NewCst) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register Dst = PtrAdd.getReg(0);
@@ -2548,7 +2557,8 @@ void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
PtrAdd.eraseFromParent();
}
-bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
+bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI,
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
@@ -2560,7 +2570,8 @@ bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
}
-bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) {
+bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI,
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT");
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
@@ -2592,7 +2603,7 @@ static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) {
}
bool CombinerHelper::matchCombineTruncOfShift(
- MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
+ MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
@@ -2653,7 +2664,7 @@ bool CombinerHelper::matchCombineTruncOfShift(
}
void CombinerHelper::applyCombineTruncOfShift(
- MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) {
+ MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) const {
MachineInstr *ShiftMI = MatchInfo.first;
LLT NewShiftTy = MatchInfo.second;
@@ -2677,39 +2688,40 @@ void CombinerHelper::applyCombineTruncOfShift(
eraseInst(MI);
}
-bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
+bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) const {
return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
return MO.isReg() &&
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
});
}
-bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
+bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) const {
return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
return !MO.isReg() ||
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
});
}
-bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
+bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
return all_of(Mask, [](int Elt) { return Elt < 0; });
}
-bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
+bool CombinerHelper::matchUndefStore(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_STORE);
return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
MRI);
}
-bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
+bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SELECT);
return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
MRI);
}
-bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) {
+bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(
+ MachineInstr &MI) const {
assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT ||
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) &&
"Expected an insert/extract element op");
@@ -2725,7 +2737,8 @@ bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) {
return Idx->getZExtValue() >= VecTy.getNumElements();
}
-bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
+bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI,
+ unsigned &OpIdx) const {
GSelect &SelMI = cast<GSelect>(MI);
auto Cst =
isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI);
@@ -2735,10 +2748,10 @@ bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
return true;
}
-void CombinerHelper::eraseInst(MachineInstr &MI) { MI.eraseFromParent(); }
+void CombinerHelper::eraseInst(MachineInstr &MI) const { MI.eraseFromParent(); }
bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
- const MachineOperand &MOP2) {
+ const MachineOperand &MOP2) const {
if (!MOP1.isReg() || !MOP2.isReg())
return false;
auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI);
@@ -2834,7 +2847,8 @@ bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
return false;
}
-bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
+bool CombinerHelper::matchConstantOp(const MachineOperand &MOP,
+ int64_t C) const {
if (!MOP.isReg())
return false;
auto *MI = MRI.getVRegDef(MOP.getReg());
@@ -2843,7 +2857,8 @@ bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
MaybeCst->getSExtValue() == C;
}
-bool CombinerHelper::matchConstantFPOp(const MachineOperand &MOP, double C) {
+bool CombinerHelper::matchConstantFPOp(const MachineOperand &MOP,
+ double C) const {
if (!MOP.isReg())
return false;
std::optional<FPValueAndVReg> MaybeCst;
@@ -2854,7 +2869,7 @@ bool CombinerHelper::matchConstantFPOp(const MachineOperand &MOP, double C) {
}
void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
- unsigned OpIdx) {
+ unsigned OpIdx) const {
assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
Register OldReg = MI.getOperand(0).getReg();
Register Replacement = MI.getOperand(OpIdx).getReg();
@@ -2864,7 +2879,7 @@ void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
}
void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
- Register Replacement) {
+ Register Replacement) const {
assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
Register OldReg = MI.getOperand(0).getReg();
assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
@@ -2873,7 +2888,7 @@ void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
}
bool CombinerHelper::matchConstantLargerBitWidth(MachineInstr &MI,
- unsigned ConstIdx) {
+ unsigned ConstIdx) const {
Register ConstReg = MI.getOperand(ConstIdx).getReg();
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
@@ -2886,7 +2901,7 @@ bool CombinerHelper::matchConstantLargerBitWidth(MachineInstr &MI,
return (VRegAndVal->Value.uge(DstTy.getSizeInBits()));
}
-void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) {
+void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) const {
assert((MI.getOpcode() == TargetOpcode::G_FSHL ||
MI.getOpcode() == TargetOpcode::G_FSHR) &&
"This is not a funnel shift operation");
@@ -2910,7 +2925,7 @@ void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) {
MI.eraseFromParent();
}
-bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
+bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SELECT);
// Match (cond ? x : x)
return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
@@ -2918,63 +2933,67 @@ bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
MRI);
}
-bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
+bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) const {
return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
MRI);
}
-bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
+bool CombinerHelper::matchOperandIsZero(MachineInstr &MI,
+ unsigned OpIdx) const {
return matchConstantOp(MI.getOperand(OpIdx), 0) &&
canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
MRI);
}
-bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
+bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
+ unsigned OpIdx) const {
MachineOperand &MO = MI.getOperand(OpIdx);
return MO.isReg() &&
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
}
bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
- unsigned OpIdx) {
+ unsigned OpIdx) const {
MachineOperand &MO = MI.getOperand(OpIdx);
return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
}
-void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
+void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
+ double C) const {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
Builder.buildFConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
-void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
+void CombinerHelper::replaceInstWithConstant(MachineInstr &MI,
+ int64_t C) const {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
Builder.buildConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
-void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) {
+void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) const {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
Builder.buildConstant(MI.getOperand(0), C);
MI.eraseFromParent();
}
void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI,
- ConstantFP *CFP) {
+ ConstantFP *CFP) const {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
Builder.buildFConstant(MI.getOperand(0), CFP->getValueAPF());
MI.eraseFromParent();
}
-void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
+void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) const {
assert(MI.getNumDefs() == 1 && "Expected only one def?");
Builder.buildUndef(MI.getOperand(0));
MI.eraseFromParent();
}
bool CombinerHelper::matchSimplifyAddToSub(
- MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) const {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
Register &NewLHS = std::get<0>(MatchInfo);
@@ -2994,7 +3013,7 @@ bool CombinerHelper::matchSimplifyAddToSub(
}
bool CombinerHelper::matchCombineInsertVecElts(
- MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
+ MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
"Invalid opcode");
Register DstReg = MI.getOperand(0).getReg();
@@ -3041,7 +3060,7 @@ bool CombinerHelper::matchCombineInsertVecElts(
}
void CombinerHelper::applyCombineInsertVecElts(
- MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
+ MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) const {
Register UndefReg;
auto GetUndef = [&]() {
if (UndefReg)
@@ -3059,7 +3078,7 @@ void CombinerHelper::applyCombineInsertVecElts(
}
void CombinerHelper::applySimplifyAddToSub(
- MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) const {
Register SubLHS, SubRHS;
std::tie(SubLHS, SubRHS) = MatchInfo;
Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
@@ -3067,7 +3086,7 @@ void CombinerHelper::applySimplifyAddToSub(
}
bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
- MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
+ MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const {
// Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
//
// Creates the new hand + logic instruction (but does not insert them.)
@@ -3175,7 +3194,7 @@ bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
}
void CombinerHelper::applyBuildInstructionSteps(
- MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
+ MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const {
assert(MatchInfo.InstrsToBuild.size() &&
"Expected at least one instr to build?");
for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
@@ -3189,7 +3208,7 @@ void CombinerHelper::applyBuildInstructionSteps(
}
bool CombinerHelper::matchAshrShlToSextInreg(
- MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_ASHR);
int64_t ShlCst, AshrCst;
Register Src;
@@ -3207,7 +3226,7 @@ bool CombinerHelper::matchAshrShlToSextInreg(
}
void CombinerHelper::applyAshShlToSextInreg(
- MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
+ MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_ASHR);
Register Src;
int64_t ShiftAmt;
@@ -3219,7 +3238,8 @@ void CombinerHelper::applyAshShlToSextInreg(
/// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
bool CombinerHelper::matchOverlappingAnd(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_AND);
Register Dst = MI.getOperand(0).getReg();
@@ -3245,7 +3265,7 @@ bool CombinerHelper::matchOverlappingAnd(
}
bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
- Register &Replacement) {
+ Register &Replacement) const {
// Given
//
// %y:_(sN) = G_SOMETHING
@@ -3300,7 +3320,8 @@ bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
return false;
}
-bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
+bool CombinerHelper::matchRedundantOr(MachineInstr &MI,
+ Register &Replacement) const {
// Given
//
// %y:_(sN) = G_SOMETHING
@@ -3341,7 +3362,7 @@ bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
return false;
}
-bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
+bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) const {
// If the input is already sign extended, just drop the extension.
Register Src = MI.getOperand(1).getReg();
unsigned ExtBits = MI.getOperand(2).getImm();
@@ -3373,7 +3394,7 @@ static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
//
// Only matches sources made up of G_TRUNCs followed by G_IMPLICIT_DEFs
bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
auto BuildMI = cast<GBuildVector>(&MI);
unsigned NumOperands = BuildMI->getNumSources();
LLT DstTy = MRI.getType(BuildMI->getReg(0));
@@ -3436,7 +3457,7 @@ bool CombinerHelper::matchUseVectorTruncate(MachineInstr &MI,
}
void CombinerHelper::applyUseVectorTruncate(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
Register MidReg;
auto BuildMI = cast<GBuildVector>(&MI);
Register DstReg = BuildMI->getReg(0);
@@ -3462,8 +3483,8 @@ void CombinerHelper::applyUseVectorTruncate(MachineInstr &MI,
MI.eraseFromParent();
}
-bool CombinerHelper::matchNotCmp(MachineInstr &MI,
- SmallVectorImpl<Register> &RegsToNegate) {
+bool CombinerHelper::matchNotCmp(
+ MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate) const {
assert(MI.getOpcode() == TargetOpcode::G_XOR);
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
@@ -3539,8 +3560,8 @@ bool CombinerHelper::matchNotCmp(MachineInstr &MI,
return true;
}
-void CombinerHelper::applyNotCmp(MachineInstr &MI,
- SmallVectorImpl<Register> &RegsToNegate) {
+void CombinerHelper::applyNotCmp(
+ MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate) const {
for (Register Reg : RegsToNegate) {
MachineInstr *Def = MRI.getVRegDef(Reg);
Observer.changingInstr(*Def);
@@ -3572,7 +3593,7 @@ void CombinerHelper::applyNotCmp(MachineInstr &MI,
}
bool CombinerHelper::matchXorOfAndWithSameReg(
- MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
+ MachineInstr &MI, std::pair<Register, Register> &MatchInfo) const {
// Match (xor (and x, y), y) (or any of its commuted cases)
assert(MI.getOpcode() == TargetOpcode::G_XOR);
Register &X = MatchInfo.first;
@@ -3603,7 +3624,7 @@ bool CombinerHelper::matchXorOfAndWithSameReg(
}
void CombinerHelper::applyXorOfAndWithSameReg(
- MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
+ MachineInstr &MI, std::pair<Register, Register> &MatchInfo) const {
// Fold (xor (and x, y), y) -> (and (not x), y)
Register X, Y;
std::tie(X, Y) = MatchInfo;
@@ -3615,7 +3636,7 @@ void CombinerHelper::applyXorOfAndWithSameReg(
Observer.changedInstr(MI);
}
-bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
+bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register DstReg = PtrAdd.getReg(0);
LLT Ty = MRI.getType(DstReg);
@@ -3634,14 +3655,14 @@ bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
return isBuildVectorAllZeros(*VecMI, MRI);
}
-void CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
+void CombinerHelper::applyPtrAddZero(MachineInstr &MI) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg());
PtrAdd.eraseFromParent();
}
/// The second source operand is known to be a power of 2.
-void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
+void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) const {
Register DstReg = MI.getOperand(0).getReg();
Register Src0 = MI.getOperand(1).getReg();
Register Pow2Src1 = MI.getOperand(2).getReg();
@@ -3655,7 +3676,7 @@ void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
}
bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
- unsigned &SelectOpNo) {
+ unsigned &SelectOpNo) const {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -3708,8 +3729,8 @@ bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
/// \p SelectOperand is the operand in binary operator \p MI that is the select
/// to fold.
-void CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
- const unsigned &SelectOperand) {
+void CombinerHelper::applyFoldBinOpIntoSelect(
+ MachineInstr &MI, const unsigned &SelectOperand) const {
Register Dst = MI.getOperand(0).getReg();
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -3845,7 +3866,8 @@ matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
CombinerHelper::findLoadOffsetsForLoadOrCombine(
SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
- const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
+ const SmallVector<Register, 8> &RegsToVisit,
+ const unsigned MemSizeInBits) const {
// Each load found for the pattern. There should be one for each RegsToVisit.
SmallSetVector<const MachineInstr *, 8> Loads;
@@ -3977,7 +3999,8 @@ CombinerHelper::findLoadOffsetsForLoadOrCombine(
}
bool CombinerHelper::matchLoadOrCombine(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_OR);
MachineFunction &MF = *MI.getMF();
// Assuming a little-endian target, transform:
@@ -4090,7 +4113,7 @@ bool CombinerHelper::matchLoadOrCombine(
}
bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
- MachineInstr *&ExtMI) {
+ MachineInstr *&ExtMI) const {
auto &PHI = cast<GPhi>(MI);
Register DstReg = PHI.getReg(0);
@@ -4144,7 +4167,7 @@ bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI,
}
void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
- MachineInstr *&ExtMI) {
+ MachineInstr *&ExtMI) const {
auto &PHI = cast<GPhi>(MI);
Register DstReg = ExtMI->getOperand(0).getReg();
LLT ExtTy = MRI.getType(DstReg);
@@ -4189,7 +4212,7 @@ void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI,
}
bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
- Register &Reg) {
+ Register &Reg) const {
assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
// If we have a constant index, look for a G_BUILD_VECTOR source
// and find the source register that the index maps to.
@@ -4225,7 +4248,7 @@ bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI,
}
void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
- Register &Reg) {
+ Register &Reg) const {
// Check the type of the register, since it may have come from a
// G_BUILD_VECTOR_TRUNC.
LLT ScalarTy = MRI.getType(Reg);
@@ -4243,7 +4266,7 @@ void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI,
bool CombinerHelper::matchExtractAllEltsFromBuildVector(
MachineInstr &MI,
- SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
+ SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) const {
assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
// This combine tries to find build_vector's which have every source element
// extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
@@ -4285,7 +4308,7 @@ bool CombinerHelper::matchExtractAllEltsFromBuildVector(
void CombinerHelper::applyExtractAllEltsFromBuildVector(
MachineInstr &MI,
- SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) {
+ SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) const {
assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
for (auto &Pair : SrcDstPairs) {
auto *ExtMI = Pair.second;
@@ -4296,18 +4319,20 @@ void CombinerHelper::applyExtractAllEltsFromBuildVector(
}
void CombinerHelper::applyBuildFn(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
applyBuildFnNoErase(MI, MatchInfo);
MI.eraseFromParent();
}
void CombinerHelper::applyBuildFnNoErase(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
MatchInfo(Builder);
}
bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_OR);
Register Dst = MI.getOperand(0).getReg();
@@ -4360,7 +4385,7 @@ bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI,
}
/// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
-bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
+bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) const {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
Register X = MI.getOperand(1).getReg();
@@ -4372,7 +4397,7 @@ bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) {
return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}});
}
-void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
+void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) const {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR);
bool IsFSHL = Opc == TargetOpcode::G_FSHL;
@@ -4384,7 +4409,7 @@ void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) {
}
// Fold (rot x, c) -> (rot x, c % BitSize)
-bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
+bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
MI.getOpcode() == TargetOpcode::G_ROTR);
unsigned Bitsize =
@@ -4399,7 +4424,7 @@ bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) {
return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange;
}
-void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
+void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_ROTL ||
MI.getOpcode() == TargetOpcode::G_ROTR);
unsigned Bitsize =
@@ -4414,7 +4439,7 @@ void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) {
}
bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
- int64_t &MatchInfo) {
+ int64_t &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_ICMP);
auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
@@ -4458,7 +4483,8 @@ bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI,
}
bool CombinerHelper::matchICmpToLHSKnownBits(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_ICMP);
// Given:
//
@@ -4501,7 +4527,8 @@ bool CombinerHelper::matchICmpToLHSKnownBits(
// Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
bool CombinerHelper::matchAndOrDisjointMask(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_AND);
// Ignore vector types to simplify matching the two constants.
@@ -4536,7 +4563,8 @@ bool CombinerHelper::matchAndOrDisjointMask(
/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
Register Dst = MI.getOperand(0).getReg();
Register Src = MI.getOperand(1).getReg();
@@ -4565,7 +4593,7 @@ bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GAnd *And = cast<GAnd>(&MI);
Register Dst = And->getReg(0);
LLT Ty = MRI.getType(Dst);
@@ -4602,7 +4630,8 @@ bool CombinerHelper::matchBitfieldExtractFromAnd(MachineInstr &MI,
}
bool CombinerHelper::matchBitfieldExtractFromShr(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
const unsigned Opcode = MI.getOpcode();
assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR);
@@ -4651,7 +4680,8 @@ bool CombinerHelper::matchBitfieldExtractFromShr(
}
bool CombinerHelper::matchBitfieldExtractFromShrAnd(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
const unsigned Opcode = MI.getOpcode();
assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR);
@@ -4708,7 +4738,7 @@ bool CombinerHelper::matchBitfieldExtractFromShrAnd(
}
bool CombinerHelper::reassociationCanBreakAddressingModePattern(
- MachineInstr &MI) {
+ MachineInstr &MI) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
Register Src1Reg = PtrAdd.getBaseReg();
@@ -4774,7 +4804,7 @@ bool CombinerHelper::reassociationCanBreakAddressingModePattern(
bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
MachineInstr *RHS,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
Register Src1Reg = MI.getOperand(1).getReg();
if (RHS->getOpcode() != TargetOpcode::G_ADD)
@@ -4799,7 +4829,7 @@ bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI,
bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
MachineInstr *LHS,
MachineInstr *RHS,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
// if and only if (G_PTR_ADD X, C) has one use.
Register LHSBase;
@@ -4827,10 +4857,9 @@ bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI,
return !reassociationCanBreakAddressingModePattern(MI);
}
-bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
- MachineInstr *LHS,
- MachineInstr *RHS,
- BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchReassocFoldConstantsInSubTree(
+ GPtrAdd &MI, MachineInstr *LHS, MachineInstr *RHS,
+ BuildFnTy &MatchInfo) const {
// G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS);
if (!LHSPtrAdd)
@@ -4857,7 +4886,7 @@ bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI,
}
bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
auto &PtrAdd = cast<GPtrAdd>(MI);
// We're trying to match a few pointer computation patterns here for
// re-association opportunities.
@@ -4890,7 +4919,7 @@ bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI,
}
bool CombinerHelper::tryReassocBinOp(unsigned Opc, Register DstReg,
Register OpLHS, Register OpRHS,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
LLT OpRHSTy = MRI.getType(OpRHS);
MachineInstr *OpLHSDef = MRI.getVRegDef(OpLHS);
@@ -4930,7 +4959,7 @@ bool CombinerHelper::tryReassocBinOp(unsigned Opc, Register DstReg,
}
bool CombinerHelper::matchReassocCommBinOp(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// We don't check if the reassociation will break a legal addressing mode
// here since pointer arithmetic is handled by G_PTR_ADD.
unsigned Opc = MI.getOpcode();
@@ -4945,7 +4974,8 @@ bool CombinerHelper::matchReassocCommBinOp(MachineInstr &MI,
return false;
}
-bool CombinerHelper::matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) {
+bool CombinerHelper::matchConstantFoldCastOp(MachineInstr &MI,
+ APInt &MatchInfo) const {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
Register SrcOp = MI.getOperand(1).getReg();
@@ -4957,7 +4987,8 @@ bool CombinerHelper::matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo)
return false;
}
-bool CombinerHelper::matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) {
+bool CombinerHelper::matchConstantFoldBinOp(MachineInstr &MI,
+ APInt &MatchInfo) const {
Register Op1 = MI.getOperand(1).getReg();
Register Op2 = MI.getOperand(2).getReg();
auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI);
@@ -4967,7 +4998,8 @@ bool CombinerHelper::matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo)
return true;
}
-bool CombinerHelper::matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP* &MatchInfo) {
+bool CombinerHelper::matchConstantFoldFPBinOp(MachineInstr &MI,
+ ConstantFP *&MatchInfo) const {
Register Op1 = MI.getOperand(1).getReg();
Register Op2 = MI.getOperand(2).getReg();
auto MaybeCst = ConstantFoldFPBinOp(MI.getOpcode(), Op1, Op2, MRI);
@@ -4979,7 +5011,7 @@ bool CombinerHelper::matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP* &Mat
}
bool CombinerHelper::matchConstantFoldFMA(MachineInstr &MI,
- ConstantFP *&MatchInfo) {
+ ConstantFP *&MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FMA ||
MI.getOpcode() == TargetOpcode::G_FMAD);
auto [_, Op1, Op2, Op3] = MI.getFirst4Regs();
@@ -5004,7 +5036,8 @@ bool CombinerHelper::matchConstantFoldFMA(MachineInstr &MI,
}
bool CombinerHelper::matchNarrowBinopFeedingAnd(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
// Look for a binop feeding into an AND with a mask:
//
// %add = G_ADD %lhs, %rhs
@@ -5094,7 +5127,8 @@ bool CombinerHelper::matchNarrowBinopFeedingAnd(
return true;
}
-bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchMulOBy2(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO);
@@ -5112,7 +5146,8 @@ bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) {
return true;
}
-bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchMulOBy0(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const {
// (G_*MULO x, 0) -> 0 + no carry out
assert(MI.getOpcode() == TargetOpcode::G_UMULO ||
MI.getOpcode() == TargetOpcode::G_SMULO);
@@ -5130,7 +5165,8 @@ bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) {
return true;
}
-bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchAddEToAddO(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const {
// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
assert(MI.getOpcode() == TargetOpcode::G_UADDE ||
@@ -5164,7 +5200,7 @@ bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) {
}
bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_SUB);
Register Dst = MI.getOperand(0).getReg();
// (x + y) - z -> x (if y == z)
@@ -5207,7 +5243,7 @@ bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI,
return false;
}
-MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
+MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UDIV);
auto &UDiv = cast<GenericMachineInstr>(MI);
Register Dst = UDiv.getReg(0);
@@ -5368,7 +5404,7 @@ MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) {
return MIB.buildSelect(Ty, IsOne, LHS, Q);
}
-bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
+bool CombinerHelper::matchUDivByConst(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UDIV);
Register Dst = MI.getOperand(0).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -5413,12 +5449,12 @@ bool CombinerHelper::matchUDivByConst(MachineInstr &MI) {
MRI, RHS, [](const Constant *C) { return C && !C->isNullValue(); });
}
-void CombinerHelper::applyUDivByConst(MachineInstr &MI) {
+void CombinerHelper::applyUDivByConst(MachineInstr &MI) const {
auto *NewMI = buildUDivUsingMul(MI);
replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
}
-bool CombinerHelper::matchSDivByConst(MachineInstr &MI) {
+bool CombinerHelper::matchSDivByConst(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
Register Dst = MI.getOperand(0).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -5447,12 +5483,12 @@ bool CombinerHelper::matchSDivByConst(MachineInstr &MI) {
return false;
}
-void CombinerHelper::applySDivByConst(MachineInstr &MI) {
+void CombinerHelper::applySDivByConst(MachineInstr &MI) const {
auto *NewMI = buildSDivUsingMul(MI);
replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg());
}
-MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
+MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
auto &SDiv = cast<GenericMachineInstr>(MI);
Register Dst = SDiv.getReg(0);
@@ -5516,7 +5552,7 @@ MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) {
return MIB.buildMul(Ty, Res, Factor);
}
-bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
+bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) const {
assert((MI.getOpcode() == TargetOpcode::G_SDIV ||
MI.getOpcode() == TargetOpcode::G_UDIV) &&
"Expected SDIV or UDIV");
@@ -5530,7 +5566,7 @@ bool CombinerHelper::matchDivByPow2(MachineInstr &MI, bool IsSigned) {
return matchUnaryPredicate(MRI, RHS, MatchPow2, /*AllowUndefs=*/false);
}
-void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
+void CombinerHelper::applySDivByPow2(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV");
auto &SDiv = cast<GenericMachineInstr>(MI);
Register Dst = SDiv.getReg(0);
@@ -5589,7 +5625,7 @@ void CombinerHelper::applySDivByPow2(MachineInstr &MI) {
MI.eraseFromParent();
}
-void CombinerHelper::applyUDivByPow2(MachineInstr &MI) {
+void CombinerHelper::applyUDivByPow2(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UDIV && "Expected UDIV");
auto &UDiv = cast<GenericMachineInstr>(MI);
Register Dst = UDiv.getReg(0);
@@ -5603,7 +5639,7 @@ void CombinerHelper::applyUDivByPow2(MachineInstr &MI) {
MI.eraseFromParent();
}
-bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
+bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) const {
assert(MI.getOpcode() == TargetOpcode::G_UMULH);
Register RHS = MI.getOperand(2).getReg();
Register Dst = MI.getOperand(0).getReg();
@@ -5619,7 +5655,7 @@ bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) {
return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}});
}
-void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
+void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) const {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
Register Dst = MI.getOperand(0).getReg();
@@ -5636,7 +5672,7 @@ void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) {
}
bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
unsigned Opc = MI.getOpcode();
assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB ||
Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV ||
@@ -5681,7 +5717,8 @@ bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI,
return true;
}
-bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
+bool CombinerHelper::matchFsubToFneg(MachineInstr &MI,
+ Register &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FSUB);
Register LHS = MI.getOperand(1).getReg();
@@ -5705,7 +5742,8 @@ bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
return false;
}
-void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) {
+void CombinerHelper::applyFsubToFneg(MachineInstr &MI,
+ Register &MatchInfo) const {
Register Dst = MI.getOperand(0).getReg();
Builder.buildFNeg(
Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0));
@@ -5731,7 +5769,7 @@ static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1,
bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
bool &AllowFusionGlobally,
bool &HasFMAD, bool &Aggressive,
- bool CanReassociate) {
+ bool CanReassociate) const {
auto *MF = MI.getMF();
const auto &TLI = *MF->getSubtarget().getTargetLowering();
@@ -5762,7 +5800,8 @@ bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI,
}
bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FADD);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -5810,7 +5849,8 @@ bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FADD);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -5869,7 +5909,8 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FADD);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -5934,7 +5975,8 @@ bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FADD);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -6060,7 +6102,8 @@ bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
}
bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FSUB);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -6112,7 +6155,8 @@ bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FSUB);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -6159,7 +6203,8 @@ bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FSUB);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -6210,7 +6255,8 @@ bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
- MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
+ MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_FSUB);
bool AllowFusionGlobally, HasFMAD, Aggressive;
@@ -6269,7 +6315,7 @@ bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
}
bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
- unsigned &IdxToPropagate) {
+ unsigned &IdxToPropagate) const {
bool PropagateNaN;
switch (MI.getOpcode()) {
default:
@@ -6296,7 +6342,7 @@ bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI,
return MatchNaN(1) || MatchNaN(2);
}
-bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
+bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) const {
assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD");
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
@@ -6313,7 +6359,7 @@ bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) {
}
bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
// This combine folds the following patterns:
//
// G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k))
@@ -6359,7 +6405,7 @@ bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI,
}
bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
// Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x
// if type(x) == type(G_TRUNC)
if (!mi_match(MI.getOperand(1).getReg(), MRI,
@@ -6370,7 +6416,7 @@ bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI,
}
bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI,
- Register &MatchInfo) {
+ Register &MatchInfo) const {
// Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
// y if K == size of vector element type
std::optional<ValueAndVReg> ShiftAmt;
@@ -6446,7 +6492,7 @@ CombinerHelper::computeRetValAgainstNaN(Register LHS, Register RHS,
bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond,
Register TrueVal, Register FalseVal,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// Match: select (fcmp cond x, y) x, y
// select (fcmp cond x, y) y, x
// And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition.
@@ -6501,7 +6547,7 @@ bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond,
}
bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// TODO: Handle integer cases.
assert(MI.getOpcode() == TargetOpcode::G_SELECT);
// Condition may be fed by a truncated compare.
@@ -6516,7 +6562,7 @@ bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI,
}
bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
assert(MI.getOpcode() == TargetOpcode::G_ICMP);
// (X + Y) == X --> Y == 0
// (X + Y) != X --> Y != 0
@@ -6547,7 +6593,7 @@ bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI,
return CmpInst::isEquality(Pred) && Y.isValid();
}
-bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) {
+bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) const {
Register ShiftReg = MI.getOperand(2).getReg();
LLT ResTy = MRI.getType(MI.getOperand(0).getReg());
auto IsShiftTooBig = [&](const Constant *C) {
@@ -6557,7 +6603,7 @@ bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) {
return matchUnaryPredicate(MRI, ShiftReg, IsShiftTooBig);
}
-bool CombinerHelper::matchCommuteConstantToRHS(MachineInstr &MI) {
+bool CombinerHelper::matchCommuteConstantToRHS(MachineInstr &MI) const {
unsigned LHSOpndIdx = 1;
unsigned RHSOpndIdx = 2;
switch (MI.getOpcode()) {
@@ -6587,7 +6633,7 @@ bool CombinerHelper::matchCommuteConstantToRHS(MachineInstr &MI) {
!getIConstantVRegVal(RHS, MRI);
}
-bool CombinerHelper::matchCommuteFPConstantToRHS(MachineInstr &MI) {
+bool CombinerHelper::matchCommuteFPConstantToRHS(MachineInstr &MI) const {
Register LHS = MI.getOperand(1).getReg();
Register RHS = MI.getOperand(2).getReg();
std::optional<FPValueAndVReg> ValAndVReg;
@@ -6596,7 +6642,7 @@ bool CombinerHelper::matchCommuteFPConstantToRHS(MachineInstr &MI) {
return !mi_match(RHS, MRI, m_GFCstOrSplat(ValAndVReg));
}
-void CombinerHelper::applyCommuteBinOpOperands(MachineInstr &MI) {
+void CombinerHelper::applyCommuteBinOpOperands(MachineInstr &MI) const {
Observer.changingInstr(MI);
unsigned LHSOpndIdx = 1;
unsigned RHSOpndIdx = 2;
@@ -6618,7 +6664,7 @@ void CombinerHelper::applyCommuteBinOpOperands(MachineInstr &MI) {
Observer.changedInstr(MI);
}
-bool CombinerHelper::isOneOrOneSplat(Register Src, bool AllowUndefs) {
+bool CombinerHelper::isOneOrOneSplat(Register Src, bool AllowUndefs) const {
LLT SrcTy = MRI.getType(Src);
if (SrcTy.isFixedVector())
return isConstantSplatVector(Src, 1, AllowUndefs);
@@ -6631,7 +6677,7 @@ bool CombinerHelper::isOneOrOneSplat(Register Src, bool AllowUndefs) {
return false; // scalable vector
}
-bool CombinerHelper::isZeroOrZeroSplat(Register Src, bool AllowUndefs) {
+bool CombinerHelper::isZeroOrZeroSplat(Register Src, bool AllowUndefs) const {
LLT SrcTy = MRI.getType(Src);
if (SrcTy.isFixedVector())
return isConstantSplatVector(Src, 0, AllowUndefs);
@@ -6647,7 +6693,7 @@ bool CombinerHelper::isZeroOrZeroSplat(Register Src, bool AllowUndefs) {
// Ignores COPYs during conformance checks.
// FIXME scalable vectors.
bool CombinerHelper::isConstantSplatVector(Register Src, int64_t SplatValue,
- bool AllowUndefs) {
+ bool AllowUndefs) const {
GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Src, MRI);
if (!BuildVector)
return false;
@@ -6672,7 +6718,7 @@ bool CombinerHelper::isConstantSplatVector(Register Src, int64_t SplatValue,
// Ignores COPYs during lookups.
// FIXME scalable vectors
std::optional<APInt>
-CombinerHelper::getConstantOrConstantSplatVector(Register Src) {
+CombinerHelper::getConstantOrConstantSplatVector(Register Src) const {
auto IConstant = getIConstantVRegValWithLookThrough(Src, MRI);
if (IConstant)
return IConstant->Value;
@@ -6718,7 +6764,7 @@ bool CombinerHelper::isConstantOrConstantVectorI(Register Src) const {
// TODO: use knownbits to determine zeros
bool CombinerHelper::tryFoldSelectOfConstants(GSelect *Select,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
uint32_t Flags = Select->getFlags();
Register Dest = Select->getReg(0);
Register Cond = Select->getCondReg();
@@ -6850,7 +6896,7 @@ bool CombinerHelper::tryFoldSelectOfConstants(GSelect *Select,
// TODO: use knownbits to determine zeros
bool CombinerHelper::tryFoldBoolSelectToLogic(GSelect *Select,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
uint32_t Flags = Select->getFlags();
Register DstReg = Select->getReg(0);
Register Cond = Select->getCondReg();
@@ -6931,7 +6977,7 @@ bool CombinerHelper::tryFoldBoolSelectToLogic(GSelect *Select,
}
bool CombinerHelper::matchSelectIMinMax(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GSelect *Select = cast<GSelect>(MRI.getVRegDef(MO.getReg()));
GICmp *Cmp = cast<GICmp>(MRI.getVRegDef(Select->getCondReg()));
@@ -7002,7 +7048,7 @@ bool CombinerHelper::matchSelectIMinMax(const MachineOperand &MO,
}
}
-bool CombinerHelper::matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) const {
GSelect *Select = cast<GSelect>(&MI);
if (tryFoldSelectOfConstants(Select, MatchInfo))
@@ -7018,8 +7064,8 @@ bool CombinerHelper::matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) {
/// or (icmp Pred1 V1, C1) || (icmp Pred2 V2, C2)
/// into a single comparison using range-based reasoning.
/// see InstCombinerImpl::foldAndOrOfICmpsUsingRanges.
-bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
- BuildFnTy &MatchInfo) {
+bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(
+ GLogicalBinOp *Logic, BuildFnTy &MatchInfo) const {
assert(Logic->getOpcode() != TargetOpcode::G_XOR && "unexpected xor");
bool IsAnd = Logic->getOpcode() == TargetOpcode::G_AND;
Register DstReg = Logic->getReg(0);
@@ -7178,7 +7224,7 @@ bool CombinerHelper::tryFoldAndOrOrICmpsUsingRanges(GLogicalBinOp *Logic,
}
bool CombinerHelper::tryFoldLogicOfFCmps(GLogicalBinOp *Logic,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
assert(Logic->getOpcode() != TargetOpcode::G_XOR && "unexpecte xor");
Register DestReg = Logic->getReg(0);
Register LHS = Logic->getLHSReg();
@@ -7252,7 +7298,7 @@ bool CombinerHelper::tryFoldLogicOfFCmps(GLogicalBinOp *Logic,
return false;
}
-bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) const {
GAnd *And = cast<GAnd>(&MI);
if (tryFoldAndOrOrICmpsUsingRanges(And, MatchInfo))
@@ -7264,7 +7310,7 @@ bool CombinerHelper::matchAnd(MachineInstr &MI, BuildFnTy &MatchInfo) {
return false;
}
-bool CombinerHelper::matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) const {
GOr *Or = cast<GOr>(&MI);
if (tryFoldAndOrOrICmpsUsingRanges(Or, MatchInfo))
@@ -7276,7 +7322,8 @@ bool CombinerHelper::matchOr(MachineInstr &MI, BuildFnTy &MatchInfo) {
return false;
}
-bool CombinerHelper::matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchAddOverflow(MachineInstr &MI,
+ BuildFnTy &MatchInfo) const {
GAddCarryOut *Add = cast<GAddCarryOut>(&MI);
// Addo has no flags
@@ -7444,18 +7491,20 @@ bool CombinerHelper::matchAddOverflow(MachineInstr &MI, BuildFnTy &MatchInfo) {
}
void CombinerHelper::applyBuildFnMO(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
MatchInfo(Builder);
Root->eraseFromParent();
}
-bool CombinerHelper::matchFPowIExpansion(MachineInstr &MI, int64_t Exponent) {
+bool CombinerHelper::matchFPowIExpansion(MachineInstr &MI,
+ int64_t Exponent) const {
bool OptForSize = MI.getMF()->getFunction().hasOptSize();
return getTargetLowering().isBeneficialToExpandPowI(Exponent, OptForSize);
}
-void CombinerHelper::applyExpandFPowI(MachineInstr &MI, int64_t Exponent) {
+void CombinerHelper::applyExpandFPowI(MachineInstr &MI,
+ int64_t Exponent) const {
auto [Dst, Base] = MI.getFirst2Regs();
LLT Ty = MRI.getType(Dst);
int64_t ExpVal = Exponent;
@@ -7499,7 +7548,7 @@ void CombinerHelper::applyExpandFPowI(MachineInstr &MI, int64_t Exponent) {
}
bool CombinerHelper::matchFoldAPlusC1MinusC2(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// fold (A+C1)-C2 -> A+(C1-C2)
const GSub *Sub = cast<GSub>(&MI);
GAdd *Add = cast<GAdd>(MRI.getVRegDef(Sub->getLHSReg()));
@@ -7522,7 +7571,7 @@ bool CombinerHelper::matchFoldAPlusC1MinusC2(const MachineInstr &MI,
}
bool CombinerHelper::matchFoldC2MinusAPlusC1(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// fold C2-(A+C1) -> (C2-C1)-A
const GSub *Sub = cast<GSub>(&MI);
GAdd *Add = cast<GAdd>(MRI.getVRegDef(Sub->getRHSReg()));
@@ -7545,7 +7594,7 @@ bool CombinerHelper::matchFoldC2MinusAPlusC1(const MachineInstr &MI,
}
bool CombinerHelper::matchFoldAMinusC1MinusC2(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// fold (A-C1)-C2 -> A-(C1+C2)
const GSub *Sub1 = cast<GSub>(&MI);
GSub *Sub2 = cast<GSub>(MRI.getVRegDef(Sub1->getLHSReg()));
@@ -7568,7 +7617,7 @@ bool CombinerHelper::matchFoldAMinusC1MinusC2(const MachineInstr &MI,
}
bool CombinerHelper::matchFoldC1Minus2MinusC2(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// fold (C1-A)-C2 -> (C1-C2)-A
const GSub *Sub1 = cast<GSub>(&MI);
GSub *Sub2 = cast<GSub>(MRI.getVRegDef(Sub1->getLHSReg()));
@@ -7591,7 +7640,7 @@ bool CombinerHelper::matchFoldC1Minus2MinusC2(const MachineInstr &MI,
}
bool CombinerHelper::matchFoldAMinusC1PlusC2(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
// fold ((A-C1)+C2) -> (A+(C2-C1))
const GAdd *Add = cast<GAdd>(&MI);
GSub *Sub = cast<GSub>(MRI.getVRegDef(Add->getLHSReg()));
@@ -7613,8 +7662,8 @@ bool CombinerHelper::matchFoldAMinusC1PlusC2(const MachineInstr &MI,
return true;
}
-bool CombinerHelper::matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+bool CombinerHelper::matchUnmergeValuesAnyExtBuildVector(
+ const MachineInstr &MI, BuildFnTy &MatchInfo) const {
const GUnmerge *Unmerge = cast<GUnmerge>(&MI);
if (!MRI.hasOneNonDBGUse(Unmerge->getSourceReg()))
@@ -7696,7 +7745,7 @@ bool CombinerHelper::matchUnmergeValuesAnyExtBuildVector(const MachineInstr &MI,
}
bool CombinerHelper::matchShuffleUndefRHS(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
bool Changed = false;
auto &Shuffle = cast<GShuffleVector>(MI);
@@ -7740,7 +7789,7 @@ static void commuteMask(MutableArrayRef<int> Mask, const unsigned NumElems) {
}
bool CombinerHelper::matchShuffleDisjointMask(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
auto &Shuffle = cast<GShuffleVector>(MI);
// If any of the two inputs is already undef, don't check the mask again to
@@ -7792,7 +7841,7 @@ bool CombinerHelper::matchShuffleDisjointMask(MachineInstr &MI,
}
bool CombinerHelper::matchSuboCarryOut(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GSubCarryOut *Subo = cast<GSubCarryOut>(&MI);
Register Dst = Subo->getReg(0);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperArtifacts.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperArtifacts.cpp
index 797a1e84e21e35..24d2d9ddaeebd2 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperArtifacts.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperArtifacts.cpp
@@ -29,7 +29,7 @@
using namespace llvm;
bool CombinerHelper::matchMergeXAndUndef(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GMerge *Merge = cast<GMerge>(&MI);
Register Dst = Merge->getReg(0);
@@ -58,7 +58,7 @@ bool CombinerHelper::matchMergeXAndUndef(const MachineInstr &MI,
}
bool CombinerHelper::matchMergeXAndZero(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GMerge *Merge = cast<GMerge>(&MI);
Register Dst = Merge->getReg(0);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
index 30557e6a2304e6..08129de1ff2c4a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCasts.cpp
@@ -26,7 +26,7 @@
using namespace llvm;
bool CombinerHelper::matchSextOfTrunc(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GSext *Sext = cast<GSext>(getDefIgnoringCopies(MO.getReg(), MRI));
GTrunc *Trunc = cast<GTrunc>(getDefIgnoringCopies(Sext->getSrcReg(), MRI));
@@ -59,7 +59,7 @@ bool CombinerHelper::matchSextOfTrunc(const MachineOperand &MO,
}
bool CombinerHelper::matchZextOfTrunc(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GZext *Zext = cast<GZext>(getDefIgnoringCopies(MO.getReg(), MRI));
GTrunc *Trunc = cast<GTrunc>(getDefIgnoringCopies(Zext->getSrcReg(), MRI));
@@ -94,7 +94,7 @@ bool CombinerHelper::matchZextOfTrunc(const MachineOperand &MO,
}
bool CombinerHelper::matchNonNegZext(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GZext *Zext = cast<GZext>(MRI.getVRegDef(MO.getReg()));
Register Dst = Zext->getReg(0);
@@ -116,7 +116,7 @@ bool CombinerHelper::matchNonNegZext(const MachineOperand &MO,
bool CombinerHelper::matchTruncateOfExt(const MachineInstr &Root,
const MachineInstr &ExtMI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GTrunc *Trunc = cast<GTrunc>(&Root);
const GExtOp *Ext = cast<GExtOp>(&ExtMI);
@@ -180,7 +180,7 @@ bool CombinerHelper::isCastFree(unsigned Opcode, LLT ToTy, LLT FromTy) const {
bool CombinerHelper::matchCastOfSelect(const MachineInstr &CastMI,
const MachineInstr &SelectMI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GExtOrTruncOp *Cast = cast<GExtOrTruncOp>(&CastMI);
const GSelect *Select = cast<GSelect>(&SelectMI);
@@ -212,7 +212,7 @@ bool CombinerHelper::matchCastOfSelect(const MachineInstr &CastMI,
bool CombinerHelper::matchExtOfExt(const MachineInstr &FirstMI,
const MachineInstr &SecondMI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GExtOp *First = cast<GExtOp>(&FirstMI);
const GExtOp *Second = cast<GExtOp>(&SecondMI);
@@ -276,7 +276,7 @@ bool CombinerHelper::matchExtOfExt(const MachineInstr &FirstMI,
bool CombinerHelper::matchCastOfBuildVector(const MachineInstr &CastMI,
const MachineInstr &BVMI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GExtOrTruncOp *Cast = cast<GExtOrTruncOp>(&CastMI);
const GBuildVector *BV = cast<GBuildVector>(&BVMI);
@@ -316,7 +316,7 @@ bool CombinerHelper::matchCastOfBuildVector(const MachineInstr &CastMI,
bool CombinerHelper::matchNarrowBinop(const MachineInstr &TruncMI,
const MachineInstr &BinopMI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GTrunc *Trunc = cast<GTrunc>(&TruncMI);
const GBinOp *BinOp = cast<GBinOp>(&BinopMI);
@@ -340,7 +340,7 @@ bool CombinerHelper::matchNarrowBinop(const MachineInstr &TruncMI,
}
bool CombinerHelper::matchCastOfInteger(const MachineInstr &CastMI,
- APInt &MatchInfo) {
+ APInt &MatchInfo) const {
const GExtOrTruncOp *Cast = cast<GExtOrTruncOp>(&CastMI);
APInt Input = getIConstantFromReg(Cast->getSrcReg(), MRI);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCompares.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCompares.cpp
index 872b5fed11c6e6..fc40533cf3dc95 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperCompares.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperCompares.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
bool CombinerHelper::constantFoldICmp(const GICmp &ICmp,
const GIConstant &LHSCst,
const GIConstant &RHSCst,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
if (LHSCst.getKind() != GIConstant::GIConstantKind::Scalar)
return false;
@@ -60,7 +60,7 @@ bool CombinerHelper::constantFoldICmp(const GICmp &ICmp,
bool CombinerHelper::constantFoldFCmp(const GFCmp &FCmp,
const GFConstant &LHSCst,
const GFConstant &RHSCst,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
if (LHSCst.getKind() != GFConstant::GFConstantKind::Scalar)
return false;
@@ -89,7 +89,7 @@ bool CombinerHelper::constantFoldFCmp(const GFCmp &FCmp,
}
bool CombinerHelper::matchCanonicalizeICmp(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GICmp *Cmp = cast<GICmp>(&MI);
Register Dst = Cmp->getReg(0);
@@ -114,7 +114,7 @@ bool CombinerHelper::matchCanonicalizeICmp(const MachineInstr &MI,
}
bool CombinerHelper::matchCanonicalizeFCmp(const MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
const GFCmp *Cmp = cast<GFCmp>(&MI);
Register Dst = Cmp->getReg(0);
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
index 84fb3b59658956..229076bbdeaa50 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelperVectorOps.cpp
@@ -31,7 +31,7 @@ using namespace llvm;
using namespace MIPatternMatch;
bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GExtractVectorElement *Extract = cast<GExtractVectorElement>(&MI);
Register Dst = Extract->getReg(0);
@@ -89,7 +89,7 @@ bool CombinerHelper::matchExtractVectorElement(MachineInstr &MI,
}
bool CombinerHelper::matchExtractVectorElementWithDifferentIndices(
- const MachineOperand &MO, BuildFnTy &MatchInfo) {
+ const MachineOperand &MO, BuildFnTy &MatchInfo) const {
MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
GExtractVectorElement *Extract = cast<GExtractVectorElement>(Root);
@@ -146,7 +146,8 @@ bool CombinerHelper::matchExtractVectorElementWithDifferentIndices(
}
bool CombinerHelper::matchExtractVectorElementWithBuildVector(
- const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) {
+ const MachineInstr &MI, const MachineInstr &MI2,
+ BuildFnTy &MatchInfo) const {
const GExtractVectorElement *Extract = cast<GExtractVectorElement>(&MI);
const GBuildVector *Build = cast<GBuildVector>(&MI2);
@@ -185,7 +186,7 @@ bool CombinerHelper::matchExtractVectorElementWithBuildVector(
}
bool CombinerHelper::matchExtractVectorElementWithBuildVectorTrunc(
- const MachineOperand &MO, BuildFnTy &MatchInfo) {
+ const MachineOperand &MO, BuildFnTy &MatchInfo) const {
MachineInstr *Root = getDefIgnoringCopies(MO.getReg(), MRI);
GExtractVectorElement *Extract = cast<GExtractVectorElement>(Root);
@@ -252,7 +253,8 @@ bool CombinerHelper::matchExtractVectorElementWithBuildVectorTrunc(
}
bool CombinerHelper::matchExtractVectorElementWithShuffleVector(
- const MachineInstr &MI, const MachineInstr &MI2, BuildFnTy &MatchInfo) {
+ const MachineInstr &MI, const MachineInstr &MI2,
+ BuildFnTy &MatchInfo) const {
const GExtractVectorElement *Extract = cast<GExtractVectorElement>(&MI);
const GShuffleVector *Shuffle = cast<GShuffleVector>(&MI2);
@@ -338,7 +340,7 @@ bool CombinerHelper::matchExtractVectorElementWithShuffleVector(
}
bool CombinerHelper::matchInsertVectorElementOOB(MachineInstr &MI,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GInsertVectorElement *Insert = cast<GInsertVectorElement>(&MI);
Register Dst = Insert->getReg(0);
@@ -361,7 +363,7 @@ bool CombinerHelper::matchInsertVectorElementOOB(MachineInstr &MI,
}
bool CombinerHelper::matchAddOfVScale(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GAdd *Add = cast<GAdd>(MRI.getVRegDef(MO.getReg()));
GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getLHSReg()));
GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Add->getRHSReg()));
@@ -380,7 +382,7 @@ bool CombinerHelper::matchAddOfVScale(const MachineOperand &MO,
}
bool CombinerHelper::matchMulOfVScale(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GMul *Mul = cast<GMul>(MRI.getVRegDef(MO.getReg()));
GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Mul->getLHSReg()));
@@ -401,7 +403,7 @@ bool CombinerHelper::matchMulOfVScale(const MachineOperand &MO,
}
bool CombinerHelper::matchSubOfVScale(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GSub *Sub = cast<GSub>(MRI.getVRegDef(MO.getReg()));
GVScale *RHSVScale = cast<GVScale>(MRI.getVRegDef(Sub->getRHSReg()));
@@ -421,7 +423,7 @@ bool CombinerHelper::matchSubOfVScale(const MachineOperand &MO,
}
bool CombinerHelper::matchShlOfVScale(const MachineOperand &MO,
- BuildFnTy &MatchInfo) {
+ BuildFnTy &MatchInfo) const {
GShl *Shl = cast<GShl>(MRI.getVRegDef(MO.getReg()));
GVScale *LHSVScale = cast<GVScale>(MRI.getVRegDef(Shl->getSrcReg()));
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
index 13dd934543a709..d76918b9139846 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64O0PreLegalizerCombiner.cpp
@@ -41,8 +41,7 @@ namespace {
class AArch64O0PreLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const AArch64O0PreLegalizerCombinerImplRuleConfig &RuleConfig;
const AArch64Subtarget &STI;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
index 28d9f4f50f3883..cf6b2ce9c53416 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp
@@ -440,8 +440,7 @@ void applyCombineMulCMLT(MachineInstr &MI, MachineRegisterInfo &MRI,
class AArch64PostLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig;
const AArch64Subtarget &STI;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 56d70ffdece713..5fe2e3cefa1125 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -1243,8 +1243,7 @@ void applyExtMulToMULL(MachineInstr &MI, MachineRegisterInfo &MRI,
class AArch64PostLegalizerLoweringImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
const AArch64Subtarget &STI;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 80459827c30f3f..bbf1883925a690 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -605,7 +605,8 @@ void applyPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
}
bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
- CombinerHelper &Helper, GISelChangeObserver &Observer) {
+ const CombinerHelper &Helper,
+ GISelChangeObserver &Observer) {
// Try simplify G_UADDO with 8 or 16 bit operands to wide G_ADD and TBNZ if
// result is only used in the no-overflow case. It is restricted to cases
// where we know that the high-bits of the operands are 0. If there's an
@@ -720,8 +721,7 @@ bool tryToSimplifyUADDO(MachineInstr &MI, MachineIRBuilder &B,
class AArch64PreLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig;
const AArch64Subtarget &STI;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
index 1e31fa3218d9cb..98c48f4fe3705b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
@@ -48,8 +48,7 @@ class AMDGPURegBankCombinerImpl : public Combiner {
const RegisterBankInfo &RBI;
const TargetRegisterInfo &TRI;
const SIInstrInfo &TII;
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
public:
AMDGPURegBankCombinerImpl(
diff --git a/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
index bd8a065011c928..56d47007cb1b01 100644
--- a/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPostLegalizerCombiner.cpp
@@ -43,8 +43,7 @@ class MipsPostLegalizerCombinerImpl : public Combiner {
protected:
const MipsPostLegalizerCombinerImplRuleConfig &RuleConfig;
const MipsSubtarget &STI;
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
public:
MipsPostLegalizerCombinerImpl(
diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index 0765233bfc3157..80c1a5eaa52dc9 100644
--- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -37,8 +37,7 @@ struct MipsPreLegalizerCombinerInfo : public CombinerInfo {
class MipsPreLegalizerCombinerImpl : public Combiner {
protected:
const MipsSubtarget &STI;
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
public:
MipsPreLegalizerCombinerImpl(MachineFunction &MF, CombinerInfo &CInfo,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
index a8c9d7bff154b6..aa44c0c15bb942 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVO0PreLegalizerCombiner.cpp
@@ -38,8 +38,7 @@ namespace {
class RISCVO0PreLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const RISCVO0PreLegalizerCombinerImplRuleConfig &RuleConfig;
const RISCVSubtarget &STI;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
index 3814cee6015e68..c558ed66f3a15c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVPostLegalizerCombiner.cpp
@@ -44,8 +44,7 @@ namespace {
class RISCVPostLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const RISCVPostLegalizerCombinerImplRuleConfig &RuleConfig;
const RISCVSubtarget &STI;
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp b/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
index 7e533e4bd798f0..efcb24706886ed 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVPreLegalizerCombiner.cpp
@@ -40,8 +40,7 @@ namespace {
class RISCVPreLegalizerCombinerImpl : public Combiner {
protected:
- // TODO: Make CombinerHelper methods const.
- mutable CombinerHelper Helper;
+ const CombinerHelper Helper;
const RISCVPreLegalizerCombinerImplRuleConfig &RuleConfig;
const RISCVSubtarget &STI;
More information about the llvm-commits
mailing list