[llvm] [CodeGen][NFC] Format RegisterCoalescer sources (PR #124697)
Akshat Oke via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 27 21:25:56 PST 2025
https://github.com/optimisan created https://github.com/llvm/llvm-project/pull/124697
None
>From c5b93264edff08bd779617339072048daffac9ec Mon Sep 17 00:00:00 2001
From: Akshat Oke <Akshat.Oke at amd.com>
Date: Mon, 27 Jan 2025 13:58:49 +0000
Subject: [PATCH] [CodeGen][NFC] Format RegisterCoalescer sources
---
llvm/lib/CodeGen/RegisterCoalescer.cpp | 852 +++++++++++++------------
llvm/lib/CodeGen/RegisterCoalescer.h | 128 ++--
2 files changed, 493 insertions(+), 487 deletions(-)
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 9d49b24ab15789..1970d550113b6a 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -62,15 +62,15 @@ using namespace llvm;
#define DEBUG_TYPE "regalloc"
-STATISTIC(numJoins , "Number of interval joins performed");
-STATISTIC(numCrossRCs , "Number of cross class joins performed");
-STATISTIC(numCommutes , "Number of instruction commuting performed");
-STATISTIC(numExtends , "Number of copies extended");
-STATISTIC(NumReMats , "Number of instructions re-materialized");
-STATISTIC(NumInflated , "Number of register classes inflated");
+STATISTIC(numJoins, "Number of interval joins performed");
+STATISTIC(numCrossRCs, "Number of cross class joins performed");
+STATISTIC(numCommutes, "Number of instruction commuting performed");
+STATISTIC(numExtends, "Number of copies extended");
+STATISTIC(NumReMats, "Number of instructions re-materialized");
+STATISTIC(NumInflated, "Number of register classes inflated");
STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested");
-STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved");
-STATISTIC(NumShrinkToUses, "Number of shrinkToUses called");
+STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved");
+STATISTIC(NumShrinkToUses, "Number of shrinkToUses called");
static cl::opt<bool> EnableJoining("join-liveintervals",
cl::desc("Coalesce copies (default=true)"),
@@ -81,20 +81,20 @@ static cl::opt<bool> UseTerminalRule("terminal-rule",
cl::init(false), cl::Hidden);
/// Temporary flag to test critical edge unsplitting.
-static cl::opt<bool>
-EnableJoinSplits("join-splitedges",
- cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden);
+static cl::opt<bool> EnableJoinSplits(
+ "join-splitedges",
+ cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden);
/// Temporary flag to test global copy optimization.
-static cl::opt<cl::boolOrDefault>
-EnableGlobalCopies("join-globalcopies",
- cl::desc("Coalesce copies that span blocks (default=subtarget)"),
- cl::init(cl::BOU_UNSET), cl::Hidden);
+static cl::opt<cl::boolOrDefault> EnableGlobalCopies(
+ "join-globalcopies",
+ cl::desc("Coalesce copies that span blocks (default=subtarget)"),
+ cl::init(cl::BOU_UNSET), cl::Hidden);
-static cl::opt<bool>
-VerifyCoalescing("verify-coalescing",
- cl::desc("Verify machine instrs before and after register coalescing"),
- cl::Hidden);
+static cl::opt<bool> VerifyCoalescing(
+ "verify-coalescing",
+ cl::desc("Verify machine instrs before and after register coalescing"),
+ cl::Hidden);
static cl::opt<unsigned> LateRematUpdateThreshold(
"late-remat-update-threshold", cl::Hidden,
@@ -120,287 +120,286 @@ static cl::opt<unsigned> LargeIntervalFreqThreshold(
namespace {
- class JoinVals;
-
- class RegisterCoalescer : public MachineFunctionPass,
- private LiveRangeEdit::Delegate {
- MachineFunction* MF = nullptr;
- MachineRegisterInfo* MRI = nullptr;
- const TargetRegisterInfo* TRI = nullptr;
- const TargetInstrInfo* TII = nullptr;
- LiveIntervals *LIS = nullptr;
- const MachineLoopInfo* Loops = nullptr;
- AliasAnalysis *AA = nullptr;
- RegisterClassInfo RegClassInfo;
-
- /// Position and VReg of a PHI instruction during coalescing.
- struct PHIValPos {
- SlotIndex SI; ///< Slot where this PHI occurs.
- Register Reg; ///< VReg the PHI occurs in.
- unsigned SubReg; ///< Qualifying subregister for Reg.
- };
-
- /// Map from debug instruction number to PHI position during coalescing.
- DenseMap<unsigned, PHIValPos> PHIValToPos;
- /// Index of, for each VReg, which debug instruction numbers and
- /// corresponding PHIs are sensitive to coalescing. Each VReg may have
- /// multiple PHI defs, at different positions.
- DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx;
-
- /// Debug variable location tracking -- for each VReg, maintain an
- /// ordered-by-slot-index set of DBG_VALUEs, to help quick
- /// identification of whether coalescing may change location validity.
- using DbgValueLoc = std::pair<SlotIndex, MachineInstr*>;
- DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues;
-
- /// A LaneMask to remember on which subregister live ranges we need to call
- /// shrinkToUses() later.
- LaneBitmask ShrinkMask;
-
- /// True if the main range of the currently coalesced intervals should be
- /// checked for smaller live intervals.
- bool ShrinkMainRange = false;
-
- /// True if the coalescer should aggressively coalesce global copies
- /// in favor of keeping local copies.
- bool JoinGlobalCopies = false;
-
- /// True if the coalescer should aggressively coalesce fall-thru
- /// blocks exclusively containing copies.
- bool JoinSplitEdges = false;
-
- /// Copy instructions yet to be coalesced.
- SmallVector<MachineInstr*, 8> WorkList;
- SmallVector<MachineInstr*, 8> LocalWorkList;
-
- /// Set of instruction pointers that have been erased, and
- /// that may be present in WorkList.
- SmallPtrSet<MachineInstr*, 8> ErasedInstrs;
-
- /// Dead instructions that are about to be deleted.
- SmallVector<MachineInstr*, 8> DeadDefs;
-
- /// Virtual registers to be considered for register class inflation.
- SmallVector<Register, 8> InflateRegs;
-
- /// The collection of live intervals which should have been updated
- /// immediately after rematerialiation but delayed until
- /// lateLiveIntervalUpdate is called.
- DenseSet<Register> ToBeUpdated;
-
- /// Record how many times the large live interval with many valnos
- /// has been tried to join with other live interval.
- DenseMap<Register, unsigned long> LargeLIVisitCounter;
-
- /// Recursively eliminate dead defs in DeadDefs.
- void eliminateDeadDefs(LiveRangeEdit *Edit = nullptr);
-
- /// LiveRangeEdit callback for eliminateDeadDefs().
- void LRE_WillEraseInstruction(MachineInstr *MI) override;
-
- /// Coalesce the LocalWorkList.
- void coalesceLocals();
-
- /// Join compatible live intervals
- void joinAllIntervals();
-
- /// Coalesce copies in the specified MBB, putting
- /// copies that cannot yet be coalesced into WorkList.
- void copyCoalesceInMBB(MachineBasicBlock *MBB);
-
- /// Tries to coalesce all copies in CurrList. Returns true if any progress
- /// was made.
- bool copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList);
-
- /// If one def has many copy like uses, and those copy uses are all
- /// rematerialized, the live interval update needed for those
- /// rematerializations will be delayed and done all at once instead
- /// of being done multiple times. This is to save compile cost because
- /// live interval update is costly.
- void lateLiveIntervalUpdate();
-
- /// Check if the incoming value defined by a COPY at \p SLRQ in the subrange
- /// has no value defined in the predecessors. If the incoming value is the
- /// same as defined by the copy itself, the value is considered undefined.
- bool copyValueUndefInPredecessors(LiveRange &S,
- const MachineBasicBlock *MBB,
- LiveQueryResult SLRQ);
-
- /// Set necessary undef flags on subregister uses after pruning out undef
- /// lane segments from the subrange.
- void setUndefOnPrunedSubRegUses(LiveInterval &LI, Register Reg,
- LaneBitmask PrunedLanes);
-
- /// Attempt to join intervals corresponding to SrcReg/DstReg, which are the
- /// src/dst of the copy instruction CopyMI. This returns true if the copy
- /// was successfully coalesced away. If it is not currently possible to
- /// coalesce this interval, but it may be possible if other things get
- /// coalesced, then it returns true by reference in 'Again'.
- bool joinCopy(MachineInstr *CopyMI, bool &Again,
- SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs);
-
- /// Attempt to join these two intervals. On failure, this
- /// returns false. The output "SrcInt" will not have been modified, so we
- /// can use this information below to update aliases.
- bool joinIntervals(CoalescerPair &CP);
-
- /// Attempt joining two virtual registers. Return true on success.
- bool joinVirtRegs(CoalescerPair &CP);
-
- /// If a live interval has many valnos and is coalesced with other
- /// live intervals many times, we regard such live interval as having
- /// high compile time cost.
- bool isHighCostLiveInterval(LiveInterval &LI);
-
- /// Attempt joining with a reserved physreg.
- bool joinReservedPhysReg(CoalescerPair &CP);
-
- /// Add the LiveRange @p ToMerge as a subregister liverange of @p LI.
- /// Subranges in @p LI which only partially interfere with the desired
- /// LaneMask are split as necessary. @p LaneMask are the lanes that
- /// @p ToMerge will occupy in the coalescer register. @p LI has its subrange
- /// lanemasks already adjusted to the coalesced register.
- void mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge,
- LaneBitmask LaneMask, CoalescerPair &CP,
- unsigned DstIdx);
-
- /// Join the liveranges of two subregisters. Joins @p RRange into
- /// @p LRange, @p RRange may be invalid afterwards.
- void joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
- LaneBitmask LaneMask, const CoalescerPair &CP);
-
- /// We found a non-trivially-coalescable copy. If the source value number is
- /// defined by a copy from the destination reg see if we can merge these two
- /// destination reg valno# into a single value number, eliminating a copy.
- /// This returns true if an interval was modified.
- bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
-
- /// Return true if there are definitions of IntB
- /// other than BValNo val# that can reach uses of AValno val# of IntA.
- bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
- VNInfo *AValNo, VNInfo *BValNo);
-
- /// We found a non-trivially-coalescable copy.
- /// If the source value number is defined by a commutable instruction and
- /// its other operand is coalesced to the copy dest register, see if we
- /// can transform the copy into a noop by commuting the definition.
- /// This returns a pair of two flags:
- /// - the first element is true if an interval was modified,
- /// - the second element is true if the destination interval needs
- /// to be shrunk after deleting the copy.
- std::pair<bool,bool> removeCopyByCommutingDef(const CoalescerPair &CP,
- MachineInstr *CopyMI);
-
- /// We found a copy which can be moved to its less frequent predecessor.
- bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI);
-
- /// If the source of a copy is defined by a
- /// trivial computation, replace the copy by rematerialize the definition.
- bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI,
- bool &IsDefCopy);
-
- /// Return true if a copy involving a physreg should be joined.
- bool canJoinPhys(const CoalescerPair &CP);
-
- /// Replace all defs and uses of SrcReg to DstReg and update the subregister
- /// number if it is not zero. If DstReg is a physical register and the
- /// existing subregister number of the def / use being updated is not zero,
- /// make sure to set it to the correct physical subregister.
- ///
- /// If \p IsSubregToReg, we are coalescing a DstReg = SUBREG_TO_REG
- /// SrcReg. This introduces an implicit-def of DstReg on coalesced users.
- void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx,
- bool IsSubregToReg);
-
- /// If the given machine operand reads only undefined lanes add an undef
- /// flag.
- /// This can happen when undef uses were previously concealed by a copy
- /// which we coalesced. Example:
- /// %0:sub0<def,read-undef> = ...
- /// %1 = COPY %0 <-- Coalescing COPY reveals undef
- /// = use %1:sub1 <-- hidden undef use
- void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
- MachineOperand &MO, unsigned SubRegIdx);
-
- /// Handle copies of undef values. If the undef value is an incoming
- /// PHI value, it will convert @p CopyMI to an IMPLICIT_DEF.
- /// Returns nullptr if @p CopyMI was not in any way eliminable. Otherwise,
- /// it returns @p CopyMI (which could be an IMPLICIT_DEF at this point).
- MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI);
-
- /// Check whether or not we should apply the terminal rule on the
- /// destination (Dst) of \p Copy.
- /// When the terminal rule applies, Copy is not profitable to
- /// coalesce.
- /// Dst is terminal if it has exactly one affinity (Dst, Src) and
- /// at least one interference (Dst, Dst2). If Dst is terminal, the
- /// terminal rule consists in checking that at least one of
- /// interfering node, say Dst2, has an affinity of equal or greater
- /// weight with Src.
- /// In that case, Dst2 and Dst will not be able to be both coalesced
- /// with Src. Since Dst2 exposes more coalescing opportunities than
- /// Dst, we can drop \p Copy.
- bool applyTerminalRule(const MachineInstr &Copy) const;
-
- /// Wrapper method for \see LiveIntervals::shrinkToUses.
- /// This method does the proper fixing of the live-ranges when the afore
- /// mentioned method returns true.
- void shrinkToUses(LiveInterval *LI,
- SmallVectorImpl<MachineInstr * > *Dead = nullptr) {
- NumShrinkToUses++;
- if (LIS->shrinkToUses(LI, Dead)) {
- /// Check whether or not \p LI is composed by multiple connected
- /// components and if that is the case, fix that.
- SmallVector<LiveInterval*, 8> SplitLIs;
- LIS->splitSeparateComponents(*LI, SplitLIs);
- }
- }
+class JoinVals;
+
+class RegisterCoalescer : public MachineFunctionPass,
+ private LiveRangeEdit::Delegate {
+ MachineFunction *MF = nullptr;
+ MachineRegisterInfo *MRI = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ const TargetInstrInfo *TII = nullptr;
+ LiveIntervals *LIS = nullptr;
+ const MachineLoopInfo *Loops = nullptr;
+ AliasAnalysis *AA = nullptr;
+ RegisterClassInfo RegClassInfo;
+
+ /// Position and VReg of a PHI instruction during coalescing.
+ struct PHIValPos {
+ SlotIndex SI; ///< Slot where this PHI occurs.
+ Register Reg; ///< VReg the PHI occurs in.
+ unsigned SubReg; ///< Qualifying subregister for Reg.
+ };
- /// Wrapper Method to do all the necessary work when an Instruction is
- /// deleted.
- /// Optimizations should use this to make sure that deleted instructions
- /// are always accounted for.
- void deleteInstr(MachineInstr* MI) {
- ErasedInstrs.insert(MI);
- LIS->RemoveMachineInstrFromMaps(*MI);
- MI->eraseFromParent();
+ /// Map from debug instruction number to PHI position during coalescing.
+ DenseMap<unsigned, PHIValPos> PHIValToPos;
+ /// Index of, for each VReg, which debug instruction numbers and
+ /// corresponding PHIs are sensitive to coalescing. Each VReg may have
+ /// multiple PHI defs, at different positions.
+ DenseMap<Register, SmallVector<unsigned, 2>> RegToPHIIdx;
+
+ /// Debug variable location tracking -- for each VReg, maintain an
+ /// ordered-by-slot-index set of DBG_VALUEs, to help quick
+ /// identification of whether coalescing may change location validity.
+ using DbgValueLoc = std::pair<SlotIndex, MachineInstr *>;
+ DenseMap<Register, std::vector<DbgValueLoc>> DbgVRegToValues;
+
+ /// A LaneMask to remember on which subregister live ranges we need to call
+ /// shrinkToUses() later.
+ LaneBitmask ShrinkMask;
+
+ /// True if the main range of the currently coalesced intervals should be
+ /// checked for smaller live intervals.
+ bool ShrinkMainRange = false;
+
+ /// True if the coalescer should aggressively coalesce global copies
+ /// in favor of keeping local copies.
+ bool JoinGlobalCopies = false;
+
+ /// True if the coalescer should aggressively coalesce fall-thru
+ /// blocks exclusively containing copies.
+ bool JoinSplitEdges = false;
+
+ /// Copy instructions yet to be coalesced.
+ SmallVector<MachineInstr *, 8> WorkList;
+ SmallVector<MachineInstr *, 8> LocalWorkList;
+
+ /// Set of instruction pointers that have been erased, and
+ /// that may be present in WorkList.
+ SmallPtrSet<MachineInstr *, 8> ErasedInstrs;
+
+ /// Dead instructions that are about to be deleted.
+ SmallVector<MachineInstr *, 8> DeadDefs;
+
+ /// Virtual registers to be considered for register class inflation.
+ SmallVector<Register, 8> InflateRegs;
+
+ /// The collection of live intervals which should have been updated
+ /// immediately after rematerialiation but delayed until
+ /// lateLiveIntervalUpdate is called.
+ DenseSet<Register> ToBeUpdated;
+
+ /// Record how many times the large live interval with many valnos
+ /// has been tried to join with other live interval.
+ DenseMap<Register, unsigned long> LargeLIVisitCounter;
+
+ /// Recursively eliminate dead defs in DeadDefs.
+ void eliminateDeadDefs(LiveRangeEdit *Edit = nullptr);
+
+ /// LiveRangeEdit callback for eliminateDeadDefs().
+ void LRE_WillEraseInstruction(MachineInstr *MI) override;
+
+ /// Coalesce the LocalWorkList.
+ void coalesceLocals();
+
+ /// Join compatible live intervals
+ void joinAllIntervals();
+
+ /// Coalesce copies in the specified MBB, putting
+ /// copies that cannot yet be coalesced into WorkList.
+ void copyCoalesceInMBB(MachineBasicBlock *MBB);
+
+ /// Tries to coalesce all copies in CurrList. Returns true if any progress
+ /// was made.
+ bool copyCoalesceWorkList(MutableArrayRef<MachineInstr *> CurrList);
+
+ /// If one def has many copy like uses, and those copy uses are all
+ /// rematerialized, the live interval update needed for those
+ /// rematerializations will be delayed and done all at once instead
+ /// of being done multiple times. This is to save compile cost because
+ /// live interval update is costly.
+ void lateLiveIntervalUpdate();
+
+ /// Check if the incoming value defined by a COPY at \p SLRQ in the subrange
+ /// has no value defined in the predecessors. If the incoming value is the
+ /// same as defined by the copy itself, the value is considered undefined.
+ bool copyValueUndefInPredecessors(LiveRange &S, const MachineBasicBlock *MBB,
+ LiveQueryResult SLRQ);
+
+ /// Set necessary undef flags on subregister uses after pruning out undef
+ /// lane segments from the subrange.
+ void setUndefOnPrunedSubRegUses(LiveInterval &LI, Register Reg,
+ LaneBitmask PrunedLanes);
+
+ /// Attempt to join intervals corresponding to SrcReg/DstReg, which are the
+ /// src/dst of the copy instruction CopyMI. This returns true if the copy
+ /// was successfully coalesced away. If it is not currently possible to
+ /// coalesce this interval, but it may be possible if other things get
+ /// coalesced, then it returns true by reference in 'Again'.
+ bool joinCopy(MachineInstr *CopyMI, bool &Again,
+ SmallPtrSetImpl<MachineInstr *> &CurrentErasedInstrs);
+
+ /// Attempt to join these two intervals. On failure, this
+ /// returns false. The output "SrcInt" will not have been modified, so we
+ /// can use this information below to update aliases.
+ bool joinIntervals(CoalescerPair &CP);
+
+ /// Attempt joining two virtual registers. Return true on success.
+ bool joinVirtRegs(CoalescerPair &CP);
+
+ /// If a live interval has many valnos and is coalesced with other
+ /// live intervals many times, we regard such live interval as having
+ /// high compile time cost.
+ bool isHighCostLiveInterval(LiveInterval &LI);
+
+ /// Attempt joining with a reserved physreg.
+ bool joinReservedPhysReg(CoalescerPair &CP);
+
+ /// Add the LiveRange @p ToMerge as a subregister liverange of @p LI.
+ /// Subranges in @p LI which only partially interfere with the desired
+ /// LaneMask are split as necessary. @p LaneMask are the lanes that
+ /// @p ToMerge will occupy in the coalescer register. @p LI has its subrange
+ /// lanemasks already adjusted to the coalesced register.
+ void mergeSubRangeInto(LiveInterval &LI, const LiveRange &ToMerge,
+ LaneBitmask LaneMask, CoalescerPair &CP,
+ unsigned DstIdx);
+
+ /// Join the liveranges of two subregisters. Joins @p RRange into
+ /// @p LRange, @p RRange may be invalid afterwards.
+ void joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
+ LaneBitmask LaneMask, const CoalescerPair &CP);
+
+ /// We found a non-trivially-coalescable copy. If the source value number is
+ /// defined by a copy from the destination reg see if we can merge these two
+ /// destination reg valno# into a single value number, eliminating a copy.
+ /// This returns true if an interval was modified.
+ bool adjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
+
+ /// Return true if there are definitions of IntB
+ /// other than BValNo val# that can reach uses of AValno val# of IntA.
+ bool hasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
+ VNInfo *AValNo, VNInfo *BValNo);
+
+ /// We found a non-trivially-coalescable copy.
+ /// If the source value number is defined by a commutable instruction and
+ /// its other operand is coalesced to the copy dest register, see if we
+ /// can transform the copy into a noop by commuting the definition.
+ /// This returns a pair of two flags:
+ /// - the first element is true if an interval was modified,
+ /// - the second element is true if the destination interval needs
+ /// to be shrunk after deleting the copy.
+ std::pair<bool, bool> removeCopyByCommutingDef(const CoalescerPair &CP,
+ MachineInstr *CopyMI);
+
+ /// We found a copy which can be moved to its less frequent predecessor.
+ bool removePartialRedundancy(const CoalescerPair &CP, MachineInstr &CopyMI);
+
+ /// If the source of a copy is defined by a
+ /// trivial computation, replace the copy by rematerialize the definition.
+ bool reMaterializeTrivialDef(const CoalescerPair &CP, MachineInstr *CopyMI,
+ bool &IsDefCopy);
+
+ /// Return true if a copy involving a physreg should be joined.
+ bool canJoinPhys(const CoalescerPair &CP);
+
+ /// Replace all defs and uses of SrcReg to DstReg and update the subregister
+ /// number if it is not zero. If DstReg is a physical register and the
+ /// existing subregister number of the def / use being updated is not zero,
+ /// make sure to set it to the correct physical subregister.
+ ///
+ /// If \p IsSubregToReg, we are coalescing a DstReg = SUBREG_TO_REG
+ /// SrcReg. This introduces an implicit-def of DstReg on coalesced users.
+ void updateRegDefsUses(Register SrcReg, Register DstReg, unsigned SubIdx,
+ bool IsSubregToReg);
+
+ /// If the given machine operand reads only undefined lanes add an undef
+ /// flag.
+ /// This can happen when undef uses were previously concealed by a copy
+ /// which we coalesced. Example:
+ /// %0:sub0<def,read-undef> = ...
+ /// %1 = COPY %0 <-- Coalescing COPY reveals undef
+ /// = use %1:sub1 <-- hidden undef use
+ void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx,
+ MachineOperand &MO, unsigned SubRegIdx);
+
+ /// Handle copies of undef values. If the undef value is an incoming
+ /// PHI value, it will convert @p CopyMI to an IMPLICIT_DEF.
+ /// Returns nullptr if @p CopyMI was not in any way eliminable. Otherwise,
+ /// it returns @p CopyMI (which could be an IMPLICIT_DEF at this point).
+ MachineInstr *eliminateUndefCopy(MachineInstr *CopyMI);
+
+ /// Check whether or not we should apply the terminal rule on the
+ /// destination (Dst) of \p Copy.
+ /// When the terminal rule applies, Copy is not profitable to
+ /// coalesce.
+ /// Dst is terminal if it has exactly one affinity (Dst, Src) and
+ /// at least one interference (Dst, Dst2). If Dst is terminal, the
+ /// terminal rule consists in checking that at least one of
+ /// interfering node, say Dst2, has an affinity of equal or greater
+ /// weight with Src.
+ /// In that case, Dst2 and Dst will not be able to be both coalesced
+ /// with Src. Since Dst2 exposes more coalescing opportunities than
+ /// Dst, we can drop \p Copy.
+ bool applyTerminalRule(const MachineInstr &Copy) const;
+
+ /// Wrapper method for \see LiveIntervals::shrinkToUses.
+ /// This method does the proper fixing of the live-ranges when the afore
+ /// mentioned method returns true.
+ void shrinkToUses(LiveInterval *LI,
+ SmallVectorImpl<MachineInstr *> *Dead = nullptr) {
+ NumShrinkToUses++;
+ if (LIS->shrinkToUses(LI, Dead)) {
+ /// Check whether or not \p LI is composed by multiple connected
+ /// components and if that is the case, fix that.
+ SmallVector<LiveInterval *, 8> SplitLIs;
+ LIS->splitSeparateComponents(*LI, SplitLIs);
}
+ }
- /// Walk over function and initialize the DbgVRegToValues map.
- void buildVRegToDbgValueMap(MachineFunction &MF);
+ /// Wrapper Method to do all the necessary work when an Instruction is
+ /// deleted.
+ /// Optimizations should use this to make sure that deleted instructions
+ /// are always accounted for.
+ void deleteInstr(MachineInstr *MI) {
+ ErasedInstrs.insert(MI);
+ LIS->RemoveMachineInstrFromMaps(*MI);
+ MI->eraseFromParent();
+ }
- /// Test whether, after merging, any DBG_VALUEs would refer to a
- /// different value number than before merging, and whether this can
- /// be resolved. If not, mark the DBG_VALUE as being undef.
- void checkMergingChangesDbgValues(CoalescerPair &CP, LiveRange &LHS,
- JoinVals &LHSVals, LiveRange &RHS,
- JoinVals &RHSVals);
+ /// Walk over function and initialize the DbgVRegToValues map.
+ void buildVRegToDbgValueMap(MachineFunction &MF);
- void checkMergingChangesDbgValuesImpl(Register Reg, LiveRange &OtherRange,
- LiveRange &RegRange, JoinVals &Vals2);
+ /// Test whether, after merging, any DBG_VALUEs would refer to a
+ /// different value number than before merging, and whether this can
+ /// be resolved. If not, mark the DBG_VALUE as being undef.
+ void checkMergingChangesDbgValues(CoalescerPair &CP, LiveRange &LHS,
+ JoinVals &LHSVals, LiveRange &RHS,
+ JoinVals &RHSVals);
- public:
- static char ID; ///< Class identification, replacement for typeinfo
+ void checkMergingChangesDbgValuesImpl(Register Reg, LiveRange &OtherRange,
+ LiveRange &RegRange, JoinVals &Vals2);
- RegisterCoalescer() : MachineFunctionPass(ID) {
- initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
- }
+public:
+ static char ID; ///< Class identification, replacement for typeinfo
- void getAnalysisUsage(AnalysisUsage &AU) const override;
+ RegisterCoalescer() : MachineFunctionPass(ID) {
+ initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
+ }
- MachineFunctionProperties getClearedProperties() const override {
- return MachineFunctionProperties().set(
- MachineFunctionProperties::Property::IsSSA);
- }
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
- void releaseMemory() override;
+ MachineFunctionProperties getClearedProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
- /// This is the pass entry point.
- bool runOnMachineFunction(MachineFunction&) override;
+ void releaseMemory() override;
- /// Implement the dump method.
- void print(raw_ostream &O, const Module* = nullptr) const override;
- };
+ /// This is the pass entry point.
+ bool runOnMachineFunction(MachineFunction &) override;
+
+ /// Implement the dump method.
+ void print(raw_ostream &O, const Module * = nullptr) const override;
+};
} // end anonymous namespace
@@ -421,20 +420,20 @@ INITIALIZE_PASS_END(RegisterCoalescer, "register-coalescer",
const MachineInstr *MI, Register &Src,
Register &Dst, unsigned &SrcSub,
unsigned &DstSub) {
- if (MI->isCopy()) {
- Dst = MI->getOperand(0).getReg();
- DstSub = MI->getOperand(0).getSubReg();
- Src = MI->getOperand(1).getReg();
- SrcSub = MI->getOperand(1).getSubReg();
- } else if (MI->isSubregToReg()) {
- Dst = MI->getOperand(0).getReg();
- DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
- MI->getOperand(3).getImm());
- Src = MI->getOperand(2).getReg();
- SrcSub = MI->getOperand(2).getSubReg();
- } else
- return false;
- return true;
+ if (MI->isCopy()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = MI->getOperand(0).getSubReg();
+ Src = MI->getOperand(1).getReg();
+ SrcSub = MI->getOperand(1).getSubReg();
+ } else if (MI->isSubregToReg()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
+ MI->getOperand(3).getImm());
+ Src = MI->getOperand(2).getReg();
+ SrcSub = MI->getOperand(2).getSubReg();
+ } else
+ return false;
+ return true;
}
/// Return true if this block should be vacated by the coalescer to eliminate
@@ -480,14 +479,16 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
// Eliminate DstSub on a physreg.
if (DstSub) {
Dst = TRI.getSubReg(Dst, DstSub);
- if (!Dst) return false;
+ if (!Dst)
+ return false;
DstSub = 0;
}
// Eliminate SrcSub by picking a corresponding Dst superregister.
if (SrcSub) {
Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
- if (!Dst) return false;
+ if (!Dst)
+ return false;
} else if (!MRI.getRegClass(Src)->contains(Dst)) {
return false;
}
@@ -502,8 +503,8 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) {
if (Src == Dst && SrcSub != DstSub)
return false;
- NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub,
- SrcIdx, DstIdx);
+ NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, SrcIdx,
+ DstIdx);
if (!NewRC)
return false;
} else if (DstSub) {
@@ -607,8 +608,8 @@ void RegisterCoalescer::eliminateDeadDefs(LiveRangeEdit *Edit) {
return;
}
SmallVector<Register, 8> NewRegs;
- LiveRangeEdit(nullptr, NewRegs, *MF, *LIS,
- nullptr, this).eliminateDeadDefs(DeadDefs);
+ LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
+ .eliminateDeadDefs(DeadDefs);
}
void RegisterCoalescer::LRE_WillEraseInstruction(MachineInstr *MI) {
@@ -622,9 +623,9 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
assert(!CP.isPhys() && "This doesn't work for physreg copies.");
LiveInterval &IntA =
- LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ LIS->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
LiveInterval &IntB =
- LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
+ LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
SlotIndex CopyIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
// We have a non-trivially-coalescable copy with IntA being the source and
@@ -644,19 +645,22 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
// BValNo is a value number in B that is defined by a copy from A. 'B1' in
// the example above.
LiveInterval::iterator BS = IntB.FindSegmentContaining(CopyIdx);
- if (BS == IntB.end()) return false;
+ if (BS == IntB.end())
+ return false;
VNInfo *BValNo = BS->valno;
// Get the location that B is defined at. Two options: either this value has
// an unknown definition point or it is defined at CopyIdx. If unknown, we
// can't process it.
- if (BValNo->def != CopyIdx) return false;
+ if (BValNo->def != CopyIdx)
+ return false;
// AValNo is the value number in A that defines the copy, A3 in the example.
SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true);
LiveInterval::iterator AS = IntA.FindSegmentContaining(CopyUseIdx);
// The live segment might not exist after fun with physreg coalescing.
- if (AS == IntA.end()) return false;
+ if (AS == IntA.end())
+ return false;
VNInfo *AValNo = AS->valno;
// If AValNo is defined as a copy from IntB, we can potentially process this.
@@ -668,21 +672,22 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
// Get the Segment in IntB that this value number starts with.
LiveInterval::iterator ValS =
- IntB.FindSegmentContaining(AValNo->def.getPrevSlot());
+ IntB.FindSegmentContaining(AValNo->def.getPrevSlot());
if (ValS == IntB.end())
return false;
// Make sure that the end of the live segment is inside the same block as
// CopyMI.
MachineInstr *ValSEndInst =
- LIS->getInstructionFromIndex(ValS->end.getPrevSlot());
+ LIS->getInstructionFromIndex(ValS->end.getPrevSlot());
if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent())
return false;
// Okay, we now know that ValS ends in the same block that the CopyMI
// live-range starts. If there are no intervening live segments between them
// in IntB, we can merge them.
- if (ValS+1 != BS) return false;
+ if (ValS + 1 != BS)
+ return false;
LLVM_DEBUG(dbgs() << "Extending: " << printReg(IntB.reg(), TRI));
@@ -755,8 +760,7 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP,
}
bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
- LiveInterval &IntB,
- VNInfo *AValNo,
+ LiveInterval &IntB, VNInfo *AValNo,
VNInfo *BValNo) {
// If AValNo has PHI kills, conservatively assume that IntB defs can reach
// the PHI values.
@@ -764,7 +768,8 @@ bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
return true;
for (LiveRange::Segment &ASeg : IntA.segments) {
- if (ASeg.valno != AValNo) continue;
+ if (ASeg.valno != AValNo)
+ continue;
LiveInterval::iterator BI = llvm::upper_bound(IntB, ASeg.start);
if (BI != IntB.begin())
--BI;
@@ -782,9 +787,10 @@ bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA,
/// Copy segments with value number @p SrcValNo from liverange @p Src to live
/// range @Dst and use value number @p DstValNo there.
-static std::pair<bool,bool>
-addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src,
- const VNInfo *SrcValNo) {
+static std::pair<bool, bool> addSegmentsWithValNo(LiveRange &Dst,
+ VNInfo *DstValNo,
+ const LiveRange &Src,
+ const VNInfo *SrcValNo) {
bool Changed = false;
bool MergedWithDead = false;
for (const LiveRange::Segment &S : Src.segments) {
@@ -805,7 +811,7 @@ addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo, const LiveRange &Src,
return std::make_pair(Changed, MergedWithDead);
}
-std::pair<bool,bool>
+std::pair<bool, bool>
RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *CopyMI) {
assert(!CP.isPhys());
@@ -845,19 +851,19 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true));
assert(AValNo && !AValNo->isUnused() && "COPY source not live");
if (AValNo->isPHIDef())
- return { false, false };
+ return {false, false};
MachineInstr *DefMI = LIS->getInstructionFromIndex(AValNo->def);
if (!DefMI)
- return { false, false };
+ return {false, false};
if (!DefMI->isCommutable())
- return { false, false };
+ return {false, false};
// If DefMI is a two-address instruction then commuting it will change the
// destination register.
int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg(), /*TRI=*/nullptr);
assert(DefIdx != -1);
unsigned UseOpIdx;
if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
- return { false, false };
+ return {false, false};
// FIXME: The code below tries to commute 'UseOpIdx' operand with some other
// commutable operand which is expressed by 'CommuteAnyOperandIndex'value
@@ -870,17 +876,17 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// op#2<->op#3) of commute transformation should be considered/tried here.
unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex;
if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx))
- return { false, false };
+ return {false, false};
MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
Register NewReg = NewDstMO.getReg();
if (NewReg != IntB.reg() || !IntB.Query(AValNo->def).isKill())
- return { false, false };
+ return {false, false};
// Make sure there are no other definitions of IntB that would reach the
// uses which the new definition can reach.
if (hasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
- return { false, false };
+ return {false, false};
// If some of the uses of IntA.reg is already coalesced away, return false.
// It's not possible to determine whether it's safe to perform the coalescing.
@@ -893,7 +899,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
continue;
// If this use is tied to a def, we can't rewrite the register.
if (UseMI->isRegTiedToDefOperand(OpNo))
- return { false, false };
+ return {false, false};
}
LLVM_DEBUG(dbgs() << "\tremoveCopyByCommutingDef: " << AValNo->def << '\t'
@@ -905,10 +911,10 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *NewMI =
TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx);
if (!NewMI)
- return { false, false };
+ return {false, false};
if (IntA.reg().isVirtual() && IntB.reg().isVirtual() &&
!MRI->constrainRegClass(IntB.reg(), MRI->getRegClass(IntA.reg())))
- return { false, false };
+ return {false, false};
if (NewMI != DefMI) {
LIS->ReplaceMachineInstrInMaps(*DefMI, *NewMI);
MachineBasicBlock::iterator Pos = DefMI;
@@ -1039,7 +1045,7 @@ RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
LLVM_DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
++numCommutes;
- return { true, ShrinkB };
+ return {true, ShrinkB};
}
/// For copy B = A in BB2, if A is defined by A = B in BB0 which is a
@@ -1198,9 +1204,9 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
for (LiveInterval::SubRange &SR : IntB.subranges())
SR.createDeadDef(NewCopyIdx, LIS->getVNInfoAllocator());
- // If the newly created Instruction has an address of an instruction that was
- // deleted before (object recycled by the allocator) it needs to be removed from
- // the deleted list.
+ // If the newly created Instruction has an address of an instruction that
+ // was deleted before (object recycled by the allocator) it needs to be
+ // removed from the deleted list.
ErasedInstrs.erase(NewCopyMI);
} else {
LLVM_DEBUG(dbgs() << "\tremovePartialRedundancy: Remove the copy from "
@@ -1251,7 +1257,7 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
// to because the copy has been removed. We can go ahead and remove that
// endpoint; there is no other situation here that there could be a use at
// the same place as we know that the copy is a full copy.
- for (unsigned I = 0; I != EndPoints.size(); ) {
+ for (unsigned I = 0; I != EndPoints.size();) {
if (SlotIndex::isSameInstr(EndPoints[I], CopyIdx)) {
EndPoints[I] = EndPoints.back();
EndPoints.pop_back();
@@ -1394,7 +1400,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
DebugLoc DL = CopyMI->getDebugLoc();
MachineBasicBlock *MBB = CopyMI->getParent();
MachineBasicBlock::iterator MII =
- std::next(MachineBasicBlock::iterator(CopyMI));
+ std::next(MachineBasicBlock::iterator(CopyMI));
Edit.rematerializeAt(*MBB, MII, DstReg, RM, *TRI, false, SrcIdx, CopyMI);
MachineInstr &NewMI = *std::prev(MII);
NewMI.setDebugLoc(DL);
@@ -1408,11 +1414,11 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
if (DstIdx != 0) {
MachineOperand &DefMO = NewMI.getOperand(0);
if (DefMO.getSubReg() == DstIdx) {
- assert(SrcIdx == 0 && CP.isFlipped()
- && "Shouldn't have SrcIdx+DstIdx at this point");
+ assert(SrcIdx == 0 && CP.isFlipped() &&
+ "Shouldn't have SrcIdx+DstIdx at this point");
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
const TargetRegisterClass *CommonRC =
- TRI->getCommonSubClass(DefRC, DstRC);
+ TRI->getCommonSubClass(DefRC, DstRC);
if (CommonRC != nullptr) {
NewRC = CommonRC;
@@ -1443,7 +1449,8 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
I != E; ++I) {
MachineOperand &MO = CopyMI->getOperand(I);
if (MO.isReg()) {
- assert(MO.isImplicit() && "No explicit operands after implicit operands.");
+ assert(MO.isImplicit() &&
+ "No explicit operands after implicit operands.");
assert((MO.getReg().isPhysical() ||
(MO.getSubReg() == 0 && MO.getReg() == DstOperand.getReg())) &&
"unexpected implicit virtual register def");
@@ -1568,7 +1575,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
SlotIndex DefIndex =
CurrIdx.getRegSlot(NewMI.getOperand(0).isEarlyClobber());
LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(DstReg);
- VNInfo::Allocator& Alloc = LIS->getVNInfoAllocator();
+ VNInfo::Allocator &Alloc = LIS->getVNInfoAllocator();
for (LiveInterval::SubRange &SR : DstInt.subranges()) {
if (!SR.liveAt(DefIndex))
SR.createDeadDef(DefIndex, Alloc);
@@ -1729,7 +1736,7 @@ MachineInstr *RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// at this point.
Register SrcReg, DstReg;
unsigned SrcSubIdx = 0, DstSubIdx = 0;
- if(!isMoveInstr(*TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
+ if (!isMoveInstr(*TRI, CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
return nullptr;
SlotIndex Idx = LIS->getInstructionIndex(*CopyMI);
@@ -1758,20 +1765,20 @@ MachineInstr *RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// copy introduced a live value.
if (((V && V->isPHIDef()) || (!V && !DstLI.liveAt(Idx)))) {
for (unsigned i = CopyMI->getNumOperands(); i != 0; --i) {
- MachineOperand &MO = CopyMI->getOperand(i-1);
+ MachineOperand &MO = CopyMI->getOperand(i - 1);
if (MO.isReg()) {
if (MO.isUse())
CopyMI->removeOperand(i - 1);
} else {
assert(MO.isImm() &&
CopyMI->getOpcode() == TargetOpcode::SUBREG_TO_REG);
- CopyMI->removeOperand(i-1);
+ CopyMI->removeOperand(i - 1);
}
}
CopyMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
LLVM_DEBUG(dbgs() << "\tReplaced copy of <undef> value with an "
- "implicit def\n");
+ "implicit def\n");
return CopyMI;
}
@@ -1891,10 +1898,10 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
DefinedLanes |= SR.LaneMask;
}
- SmallPtrSet<MachineInstr*, 8> Visited;
- for (MachineRegisterInfo::reg_instr_iterator
- I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end();
- I != E; ) {
+ SmallPtrSet<MachineInstr *, 8> Visited;
+ for (MachineRegisterInfo::reg_instr_iterator I = MRI->reg_instr_begin(SrcReg),
+ E = MRI->reg_instr_end();
+ I != E;) {
MachineInstr *UseMI = &*(I++);
// Each instruction can only be rewritten once because sub-register
@@ -1905,7 +1912,7 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
if (SrcReg == DstReg && !Visited.insert(UseMI).second)
continue;
- SmallVector<unsigned,8> Ops;
+ SmallVector<unsigned, 8> Ops;
bool Reads, Writes;
std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
@@ -1946,8 +1953,8 @@ void RegisterCoalescer::updateRegDefsUses(Register SrcReg, Register DstReg,
DstInt->createSubRange(Allocator, UnusedLanes);
}
SlotIndex MIIdx = UseMI->isDebugInstr()
- ? LIS->getSlotIndexes()->getIndexBefore(*UseMI)
- : LIS->getInstructionIndex(*UseMI);
+ ? LIS->getSlotIndexes()->getIndexBefore(*UseMI)
+ : LIS->getInstructionIndex(*UseMI);
SlotIndex UseIdx = MIIdx.getRegSlot(true);
addUndefFlag(*DstInt, UseIdx, MO, SubUseIdx);
}
@@ -2100,7 +2107,7 @@ bool RegisterCoalescer::joinCopy(
if (UndefMI->isImplicitDef())
return false;
deleteInstr(CopyMI);
- return false; // Not coalescable.
+ return false; // Not coalescable.
}
}
@@ -2140,8 +2147,8 @@ bool RegisterCoalescer::joinCopy(
LI.MergeValueNumberInto(DefVNI, ReadVNI);
if (PrunedLanes.any()) {
- LLVM_DEBUG(dbgs() << "Pruning undef incoming lanes: "
- << PrunedLanes << '\n');
+ LLVM_DEBUG(dbgs() << "Pruning undef incoming lanes: " << PrunedLanes
+ << '\n');
setUndefOnPrunedSubRegUses(LI, CP.getSrcReg(), PrunedLanes);
}
@@ -2163,13 +2170,13 @@ bool RegisterCoalescer::joinCopy(
if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy))
return true;
if (IsDefCopy)
- Again = true; // May be possible to coalesce later.
+ Again = true; // May be possible to coalesce later.
return false;
}
} else {
// When possible, let DstReg be the larger interval.
if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).size() >
- LIS->getInterval(CP.getDstReg()).size())
+ LIS->getInterval(CP.getDstReg()).size())
CP.flip();
LLVM_DEBUG({
@@ -2232,7 +2239,7 @@ bool RegisterCoalescer::joinCopy(
// Otherwise, we are unable to join the intervals.
LLVM_DEBUG(dbgs() << "\tInterference!\n");
- Again = true; // May be possible to coalesce later.
+ Again = true; // May be possible to coalesce later.
return false;
}
@@ -2521,7 +2528,7 @@ class JoinVals {
const bool TrackSubRegLiveness;
/// Values that will be present in the final live range.
- SmallVectorImpl<VNInfo*> &NewVNInfo;
+ SmallVectorImpl<VNInfo *> &NewVNInfo;
const CoalescerPair &CP;
LiveIntervals *LIS;
@@ -2532,7 +2539,7 @@ class JoinVals {
/// NewVNInfo. This is suitable for passing to LiveInterval::join().
SmallVector<int, 8> Assignments;
- public:
+public:
/// Conflict resolution for overlapping values.
enum ConflictResolution {
/// No overlap, simply keep this value.
@@ -2561,7 +2568,7 @@ class JoinVals {
CR_Impossible
};
- private:
+private:
/// Per-value info for LI. The lane bit masks are all relative to the final
/// joined register, so they can be compared directly between SrcReg and
/// DstReg.
@@ -2633,7 +2640,8 @@ class JoinVals {
/// Find the ultimate value that VNI was copied from.
std::pair<const VNInfo *, Register> followCopyChain(const VNInfo *VNI) const;
- bool valuesIdentical(VNInfo *Value0, VNInfo *Value1, const JoinVals &Other) const;
+ bool valuesIdentical(VNInfo *Value0, VNInfo *Value1,
+ const JoinVals &Other) const;
/// Analyze ValNo in this live range, and set all fields of Vals[ValNo].
/// Return a conflict resolution when possible, but leave the hard cases as
@@ -2723,7 +2731,7 @@ class JoinVals {
/// Add erased instructions to ErasedInstrs.
/// Add foreign virtual registers to ShrinkRegs if their live range ended at
/// the erased instrs.
- void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
+ void eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs,
SmallVectorImpl<Register> &ShrinkRegs,
LiveInterval *LI = nullptr);
@@ -2741,14 +2749,14 @@ class JoinVals {
} // end anonymous namespace
-LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef)
- const {
+LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI,
+ bool &Redef) const {
LaneBitmask L;
for (const MachineOperand &MO : DefMI->all_defs()) {
if (MO.getReg() != Reg)
continue;
L |= TRI->getSubRegIndexLaneMask(
- TRI->composeSubRegIndices(SubIdx, MO.getSubReg()));
+ TRI->composeSubRegIndices(SubIdx, MO.getSubReg()));
if (MO.readsReg())
Redef = true;
}
@@ -2832,8 +2840,8 @@ bool JoinVals::valuesIdentical(VNInfo *Value0, VNInfo *Value1,
return Orig0->def == Orig1->def && Reg0 == Reg1;
}
-JoinVals::ConflictResolution
-JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
+JoinVals::ConflictResolution JoinVals::analyzeValue(unsigned ValNo,
+ JoinVals &Other) {
Val &V = Vals[ValNo];
assert(!V.isAnalyzed() && "Value has already been analyzed!");
VNInfo *VNI = LR.getValNumInfo(ValNo);
@@ -2971,9 +2979,9 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
if (DefMI &&
(DefMI->getParent() != OtherMBB || LIS->isLiveInToMBB(LR, OtherMBB))) {
LLVM_DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def
- << " extends into "
- << printMBBReference(*DefMI->getParent())
- << ", keeping it.\n");
+ << " extends into "
+ << printMBBReference(*DefMI->getParent())
+ << ", keeping it.\n");
OtherV.mustKeepImplicitDef(*TRI, *OtherImpDef);
} else if (OtherMBB->hasEHPadSuccessor()) {
// If OtherV is defined in a basic block that has EH pad successors then
@@ -3162,9 +3170,9 @@ bool JoinVals::mapValues(JoinVals &Other) {
return true;
}
-bool JoinVals::
-taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
- SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) {
+bool JoinVals::taintExtent(
+ unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
+ SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) {
VNInfo *VNI = LR.getValNumInfo(ValNo);
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
@@ -3225,8 +3233,8 @@ bool JoinVals::resolveConflicts(JoinVals &Other) {
if (V.Resolution != CR_Unresolved)
continue;
LLVM_DEBUG(dbgs() << "\t\tconflict at " << printReg(Reg) << ':' << i << '@'
- << LR.getValNumInfo(i)->def
- << ' ' << PrintLaneMask(LaneMask) << '\n');
+ << LR.getValNumInfo(i)->def << ' '
+ << PrintLaneMask(LaneMask) << '\n');
if (SubRangeJoin)
return false;
@@ -3259,7 +3267,7 @@ bool JoinVals::resolveConflicts(JoinVals &Other) {
assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) &&
"Interference ends on VNI->def. Should have been handled earlier");
MachineInstr *LastMI =
- Indexes->getInstructionFromIndex(TaintExtent.front().first);
+ Indexes->getInstructionFromIndex(TaintExtent.front().first);
assert(LastMI && "Range must end at a proper instruction");
unsigned TaintNum = 0;
while (true) {
@@ -3317,8 +3325,8 @@ void JoinVals::pruneValues(JoinVals &Other,
// predecessors, so the instruction should simply go away once its value
// has been replaced.
Val &OtherV = Other.Vals[Vals[i].OtherVNI->id];
- bool EraseImpDef = OtherV.ErasableImplicitDef &&
- OtherV.Resolution == CR_Keep;
+ bool EraseImpDef =
+ OtherV.ErasableImplicitDef && OtherV.Resolution == CR_Keep;
if (!Def.isBlock()) {
if (changeInstrs) {
// Remove <def,read-undef> flags. This def is now a partial redef.
@@ -3433,12 +3441,12 @@ void JoinVals::pruneSubRegValues(LiveInterval &LI, LaneBitmask &ShrinkMask) {
// If a subrange starts at the copy then an undefined value has been
// copied and we must remove that subrange value as well.
VNInfo *ValueOut = Q.valueOutOrDead();
- if (ValueOut != nullptr && (Q.valueIn() == nullptr ||
- (V.Identical && V.Resolution == CR_Erase &&
- ValueOut->def == Def))) {
+ if (ValueOut != nullptr &&
+ (Q.valueIn() == nullptr ||
+ (V.Identical && V.Resolution == CR_Erase && ValueOut->def == Def))) {
LLVM_DEBUG(dbgs() << "\t\tPrune sublane " << PrintLaneMask(S.LaneMask)
<< " at " << Def << "\n");
- SmallVector<SlotIndex,8> EndPoints;
+ SmallVector<SlotIndex, 8> EndPoints;
LIS->pruneValue(S, Def, &EndPoints);
DidPrune = true;
// Mark value number as unused.
@@ -3487,7 +3495,7 @@ static bool isDefInSubRange(LiveInterval &LI, SlotIndex Def) {
}
void JoinVals::pruneMainSegments(LiveInterval &LI, bool &ShrinkMainRange) {
- assert(&static_cast<LiveRange&>(LI) == &LR);
+ assert(&static_cast<LiveRange &>(LI) == &LR);
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
if (Vals[i].Resolution != CR_Keep)
@@ -3512,7 +3520,7 @@ void JoinVals::removeImplicitDefs() {
}
}
-void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
+void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr *> &ErasedInstrs,
SmallVectorImpl<Register> &ShrinkRegs,
LiveInterval *LI) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
@@ -3551,7 +3559,7 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
VNI->markUnused();
if (LI != nullptr && LI->hasSubRanges()) {
- assert(static_cast<LiveRange*>(LI) == &LR);
+ assert(static_cast<LiveRange *>(LI) == &LR);
// Determine the end point based on the subrange information:
// minimum of (earliest def of next segment,
// latest end point of containing segment)
@@ -3609,11 +3617,11 @@ void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
LaneBitmask LaneMask,
const CoalescerPair &CP) {
- SmallVector<VNInfo*, 16> NewVNInfo;
- JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask,
- NewVNInfo, CP, LIS, TRI, true, true);
- JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask,
- NewVNInfo, CP, LIS, TRI, true, true);
+ SmallVector<VNInfo *, 16> NewVNInfo;
+ JoinVals RHSVals(RRange, CP.getSrcReg(), CP.getSrcIdx(), LaneMask, NewVNInfo,
+ CP, LIS, TRI, true, true);
+ JoinVals LHSVals(LRange, CP.getDstReg(), CP.getDstIdx(), LaneMask, NewVNInfo,
+ CP, LIS, TRI, true, true);
// Compute NewVNInfo and resolve conflicts (see also joinVirtRegs())
// We should be able to resolve all conflicts here as we could successfully do
@@ -3649,8 +3657,8 @@ void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
LRange.join(RRange, LHSVals.getAssignments(), RHSVals.getAssignments(),
NewVNInfo);
- LLVM_DEBUG(dbgs() << "\t\tjoined lanes: " << PrintLaneMask(LaneMask)
- << ' ' << LRange << "\n");
+ LLVM_DEBUG(dbgs() << "\t\tjoined lanes: " << PrintLaneMask(LaneMask) << ' '
+ << LRange << "\n");
if (EndPoints.empty())
return;
@@ -3660,7 +3668,7 @@ void RegisterCoalescer::joinSubRegRanges(LiveRange &LRange, LiveRange &RRange,
dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";
for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {
dbgs() << EndPoints[i];
- if (i != n-1)
+ if (i != n - 1)
dbgs() << ',';
}
dbgs() << ": " << LRange << '\n';
@@ -3700,7 +3708,7 @@ bool RegisterCoalescer::isHighCostLiveInterval(LiveInterval &LI) {
}
bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
- SmallVector<VNInfo*, 16> NewVNInfo;
+ SmallVector<VNInfo *, 16> NewVNInfo;
LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
LiveInterval &LHS = LIS->getInterval(CP.getDstReg());
bool TrackSubRegLiveness = MRI->shouldTrackSubRegLiveness(*CP.getNewRC());
@@ -3869,12 +3877,12 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: ";
for (unsigned i = 0, n = EndPoints.size(); i != n; ++i) {
dbgs() << EndPoints[i];
- if (i != n-1)
+ if (i != n - 1)
dbgs() << ',';
}
dbgs() << ": " << LHS << '\n';
});
- LIS->extendToIndices((LiveRange&)LHS, EndPoints);
+ LIS->extendToIndices((LiveRange &)LHS, EndPoints);
}
return true;
@@ -3884,8 +3892,7 @@ bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) {
return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP);
}
-void RegisterCoalescer::buildVRegToDbgValueMap(MachineFunction &MF)
-{
+void RegisterCoalescer::buildVRegToDbgValueMap(MachineFunction &MF) {
const SlotIndexes &Slots = *LIS->getSlotIndexes();
SmallVector<MachineInstr *, 8> ToInsert;
@@ -3988,8 +3995,8 @@ void RegisterCoalescer::checkMergingChangesDbgValuesImpl(Register Reg,
// was coalesced and Reg deleted. It's safe to refer to the other register
// (which will be the source of the copy).
auto Resolution = RegVals.getResolution(OtherIt->valno->id);
- LastUndefResult = Resolution != JoinVals::CR_Keep &&
- Resolution != JoinVals::CR_Erase;
+ LastUndefResult =
+ Resolution != JoinVals::CR_Keep && Resolution != JoinVals::CR_Erase;
LastUndefIdx = Idx;
return LastUndefResult;
};
@@ -4026,7 +4033,7 @@ struct MBBPriorityInfo {
bool IsSplit;
MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
- : MBB(mbb), Depth(depth), IsSplit(issplit) {}
+ : MBB(mbb), Depth(depth), IsSplit(issplit) {}
};
} // end anonymous namespace
@@ -4069,8 +4076,8 @@ static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
if (SrcReg.isPhysical() || DstReg.isPhysical())
return false;
- return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg))
- || LIS->intervalIsInOneMBB(LIS->getInterval(DstReg));
+ return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg)) ||
+ LIS->intervalIsInOneMBB(LIS->getInterval(DstReg));
}
void RegisterCoalescer::lateLiveIntervalUpdate() {
@@ -4085,8 +4092,8 @@ void RegisterCoalescer::lateLiveIntervalUpdate() {
ToBeUpdated.clear();
}
-bool RegisterCoalescer::
-copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) {
+bool RegisterCoalescer::copyCoalesceWorkList(
+ MutableArrayRef<MachineInstr *> CurrList) {
bool Progress = false;
SmallPtrSet<MachineInstr *, 4> CurrentErasedInstrs;
for (MachineInstr *&MI : CurrList) {
@@ -4162,7 +4169,7 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
Register OtherSrcReg, OtherReg;
unsigned OtherSrcSubReg = 0, OtherSubReg = 0;
if (!isMoveInstr(*TRI, &Copy, OtherSrcReg, OtherReg, OtherSrcSubReg,
- OtherSubReg))
+ OtherSubReg))
return false;
if (OtherReg == SrcReg)
OtherReg = OtherSrcReg;
@@ -4179,16 +4186,15 @@ bool RegisterCoalescer::applyTerminalRule(const MachineInstr &Copy) const {
return false;
}
-void
-RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
+void RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
LLVM_DEBUG(dbgs() << MBB->getName() << ":\n");
// Collect all copy-like instructions in MBB. Don't start coalescing anything
// yet, it might invalidate the iterator.
const unsigned PrevSize = WorkList.size();
if (JoinGlobalCopies) {
- SmallVector<MachineInstr*, 2> LocalTerminals;
- SmallVector<MachineInstr*, 2> GlobalTerminals;
+ SmallVector<MachineInstr *, 2> LocalTerminals;
+ SmallVector<MachineInstr *, 2> GlobalTerminals;
// Coalesce copies bottom-up to coalesce local defs before local uses. They
// are not inherently easier to resolve, but slightly preferable until we
// have local live range splitting. In particular this is required by
@@ -4212,9 +4218,8 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
// Append the copies evicted by the terminal rule at the end of the list.
LocalWorkList.append(LocalTerminals.begin(), LocalTerminals.end());
WorkList.append(GlobalTerminals.begin(), GlobalTerminals.end());
- }
- else {
- SmallVector<MachineInstr*, 2> Terminals;
+ } else {
+ SmallVector<MachineInstr *, 2> Terminals;
for (MachineInstr &MII : *MBB)
if (MII.isCopyLike()) {
if (applyTerminalRule(MII))
@@ -4228,11 +4233,12 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) {
// Try coalescing the collected copies immediately, and remove the nulls.
// This prevents the WorkList from getting too large since most copies are
// joinable on the first attempt.
- MutableArrayRef<MachineInstr*>
- CurrList(WorkList.begin() + PrevSize, WorkList.end());
+ MutableArrayRef<MachineInstr *> CurrList(WorkList.begin() + PrevSize,
+ WorkList.end());
if (copyCoalesceWorkList(CurrList))
- WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
- nullptr), WorkList.end());
+ WorkList.erase(
+ std::remove(WorkList.begin() + PrevSize, WorkList.end(), nullptr),
+ WorkList.end());
}
void RegisterCoalescer::coalesceLocals() {
@@ -4272,7 +4278,7 @@ void RegisterCoalescer::joinAllIntervals() {
// Joining intervals can allow other intervals to be joined. Iteratively join
// until we make no progress.
while (copyCoalesceWorkList(WorkList))
- /* empty */ ;
+ /* empty */;
lateLiveIntervalUpdate();
}
@@ -4398,6 +4404,6 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
return true;
}
-void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
+void RegisterCoalescer::print(raw_ostream &O, const Module *m) const {
LIS->print(O);
}
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.h b/llvm/lib/CodeGen/RegisterCoalescer.h
index f265d93fb0d63d..6926e9b5d188f0 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.h
+++ b/llvm/lib/CodeGen/RegisterCoalescer.h
@@ -22,92 +22,92 @@ class MachineInstr;
class TargetRegisterClass;
class TargetRegisterInfo;
- /// A helper class for register coalescers. When deciding if
- /// two registers can be coalesced, CoalescerPair can determine if a copy
- /// instruction would become an identity copy after coalescing.
- class CoalescerPair {
- const TargetRegisterInfo &TRI;
+/// A helper class for register coalescers. When deciding if
+/// two registers can be coalesced, CoalescerPair can determine if a copy
+/// instruction would become an identity copy after coalescing.
+class CoalescerPair {
+ const TargetRegisterInfo &TRI;
- /// The register that will be left after coalescing. It can be a
- /// virtual or physical register.
- Register DstReg;
+ /// The register that will be left after coalescing. It can be a
+ /// virtual or physical register.
+ Register DstReg;
- /// The virtual register that will be coalesced into dstReg.
- Register SrcReg;
+ /// The virtual register that will be coalesced into dstReg.
+ Register SrcReg;
- /// The sub-register index of the old DstReg in the new coalesced register.
- unsigned DstIdx = 0;
+ /// The sub-register index of the old DstReg in the new coalesced register.
+ unsigned DstIdx = 0;
- /// The sub-register index of the old SrcReg in the new coalesced register.
- unsigned SrcIdx = 0;
+ /// The sub-register index of the old SrcReg in the new coalesced register.
+ unsigned SrcIdx = 0;
- /// True when the original copy was a partial subregister copy.
- bool Partial = false;
+ /// True when the original copy was a partial subregister copy.
+ bool Partial = false;
- /// True when both regs are virtual and newRC is constrained.
- bool CrossClass = false;
+ /// True when both regs are virtual and newRC is constrained.
+ bool CrossClass = false;
- /// True when DstReg and SrcReg are reversed from the original
- /// copy instruction.
- bool Flipped = false;
+ /// True when DstReg and SrcReg are reversed from the original
+ /// copy instruction.
+ bool Flipped = false;
- /// The register class of the coalesced register, or NULL if DstReg
- /// is a physreg. This register class may be a super-register of both
- /// SrcReg and DstReg.
- const TargetRegisterClass *NewRC = nullptr;
+ /// The register class of the coalesced register, or NULL if DstReg
+ /// is a physreg. This register class may be a super-register of both
+ /// SrcReg and DstReg.
+ const TargetRegisterClass *NewRC = nullptr;
- public:
- CoalescerPair(const TargetRegisterInfo &tri) : TRI(tri) {}
+public:
+ CoalescerPair(const TargetRegisterInfo &tri) : TRI(tri) {}
- /// Create a CoalescerPair representing a virtreg-to-physreg copy.
- /// No need to call setRegisters().
- CoalescerPair(Register VirtReg, MCRegister PhysReg,
- const TargetRegisterInfo &tri)
- : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg) {}
+ /// Create a CoalescerPair representing a virtreg-to-physreg copy.
+ /// No need to call setRegisters().
+ CoalescerPair(Register VirtReg, MCRegister PhysReg,
+ const TargetRegisterInfo &tri)
+ : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg) {}
- /// Set registers to match the copy instruction MI. Return
- /// false if MI is not a coalescable copy instruction.
- bool setRegisters(const MachineInstr*);
+ /// Set registers to match the copy instruction MI. Return
+ /// false if MI is not a coalescable copy instruction.
+ bool setRegisters(const MachineInstr *);
- /// Swap SrcReg and DstReg. Return false if swapping is impossible
- /// because DstReg is a physical register, or SubIdx is set.
- bool flip();
+ /// Swap SrcReg and DstReg. Return false if swapping is impossible
+ /// because DstReg is a physical register, or SubIdx is set.
+ bool flip();
- /// Return true if MI is a copy instruction that will become
- /// an identity copy after coalescing.
- bool isCoalescable(const MachineInstr*) const;
+ /// Return true if MI is a copy instruction that will become
+ /// an identity copy after coalescing.
+ bool isCoalescable(const MachineInstr *) const;
- /// Return true if DstReg is a physical register.
- bool isPhys() const { return !NewRC; }
+ /// Return true if DstReg is a physical register.
+ bool isPhys() const { return !NewRC; }
- /// Return true if the original copy instruction did not copy
- /// the full register, but was a subreg operation.
- bool isPartial() const { return Partial; }
+ /// Return true if the original copy instruction did not copy
+ /// the full register, but was a subreg operation.
+ bool isPartial() const { return Partial; }
- /// Return true if DstReg is virtual and NewRC is a smaller
- /// register class than DstReg's.
- bool isCrossClass() const { return CrossClass; }
+ /// Return true if DstReg is virtual and NewRC is a smaller
+ /// register class than DstReg's.
+ bool isCrossClass() const { return CrossClass; }
- /// Return true when getSrcReg is the register being defined by
- /// the original copy instruction.
- bool isFlipped() const { return Flipped; }
+ /// Return true when getSrcReg is the register being defined by
+ /// the original copy instruction.
+ bool isFlipped() const { return Flipped; }
- /// Return the register (virtual or physical) that will remain
- /// after coalescing.
- Register getDstReg() const { return DstReg; }
+ /// Return the register (virtual or physical) that will remain
+ /// after coalescing.
+ Register getDstReg() const { return DstReg; }
- /// Return the virtual register that will be coalesced away.
- Register getSrcReg() const { return SrcReg; }
+ /// Return the virtual register that will be coalesced away.
+ Register getSrcReg() const { return SrcReg; }
- /// Return the subregister index that DstReg will be coalesced into, or 0.
- unsigned getDstIdx() const { return DstIdx; }
+ /// Return the subregister index that DstReg will be coalesced into, or 0.
+ unsigned getDstIdx() const { return DstIdx; }
- /// Return the subregister index that SrcReg will be coalesced into, or 0.
- unsigned getSrcIdx() const { return SrcIdx; }
+ /// Return the subregister index that SrcReg will be coalesced into, or 0.
+ unsigned getSrcIdx() const { return SrcIdx; }
- /// Return the register class of the coalesced register.
- const TargetRegisterClass *getNewRC() const { return NewRC; }
- };
+ /// Return the register class of the coalesced register.
+ const TargetRegisterClass *getNewRC() const { return NewRC; }
+};
} // end namespace llvm
More information about the llvm-commits
mailing list