[llvm-branch-commits] [llvm-branch] r86703 - in /llvm/branches/Apple/Leela: include/llvm/Target/TargetSubtarget.h lib/CodeGen/AggressiveAntiDepBreaker.cpp lib/CodeGen/AggressiveAntiDepBreaker.h lib/CodeGen/PostRASchedulerList.cpp lib/CodeGen/ScheduleDAGInstrs.cpp lib/Target/ARM/ARMSubtarget.cpp lib/Target/ARM/ARMSubtarget.h lib/Target/TargetSubtarget.cpp lib/Target/X86/X86Subtarget.cpp lib/Target/X86/X86Subtarget.h
Bill Wendling
isanbard at gmail.com
Tue Nov 10 11:31:20 PST 2009
Author: void
Date: Tue Nov 10 13:31:19 2009
New Revision: 86703
URL: http://llvm.org/viewvc/llvm-project?rev=86703&view=rev
Log:
$ svn merge -c 86580 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r86580 into '.':
U lib/CodeGen/ScheduleDAGInstrs.cpp
$ svn merge -c 86628 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r86628 into '.':
U include/llvm/Target/TargetSubtarget.h
U lib/CodeGen/AggressiveAntiDepBreaker.cpp
U lib/CodeGen/PostRASchedulerList.cpp
U lib/CodeGen/AggressiveAntiDepBreaker.h
U lib/Target/ARM/ARMSubtarget.h
U lib/Target/X86/X86Subtarget.h
$ svn merge -c 86634 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r86634 into '.':
G include/llvm/Target/TargetSubtarget.h
G lib/CodeGen/PostRASchedulerList.cpp
U lib/Target/ARM/ARMSubtarget.cpp
G lib/Target/ARM/ARMSubtarget.h
G lib/Target/X86/X86Subtarget.h
U lib/Target/X86/X86Subtarget.cpp
U lib/Target/TargetSubtarget.cpp
Modified:
llvm/branches/Apple/Leela/include/llvm/Target/TargetSubtarget.h
llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.cpp
llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.h
llvm/branches/Apple/Leela/lib/CodeGen/PostRASchedulerList.cpp
llvm/branches/Apple/Leela/lib/CodeGen/ScheduleDAGInstrs.cpp
llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.cpp
llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.h
llvm/branches/Apple/Leela/lib/Target/TargetSubtarget.cpp
llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.cpp
llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.h
Modified: llvm/branches/Apple/Leela/include/llvm/Target/TargetSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/include/llvm/Target/TargetSubtarget.h?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/include/llvm/Target/TargetSubtarget.h (original)
+++ llvm/branches/Apple/Leela/include/llvm/Target/TargetSubtarget.h Tue Nov 10 13:31:19 2009
@@ -20,6 +20,8 @@
class SDep;
class SUnit;
+class TargetRegisterClass;
+template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
///
@@ -36,6 +38,7 @@
// AntiDepBreakMode - Type of anti-dependence breaking that should
// be performed before post-RA scheduling.
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
+ typedef SmallVectorImpl<TargetRegisterClass*> ExcludedRCVector;
virtual ~TargetSubtarget();
@@ -49,11 +52,8 @@
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling.
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- AntiDepBreakMode& mode) const {
- mode = ANTIDEP_NONE;
- return false;
- }
-
+ AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const;
// adjustSchedDependency - Perform target specific adjustments to
// the latency of a schedule dependency.
virtual void adjustSchedDependency(SUnit *def, SUnit *use,
Modified: llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.cpp (original)
+++ llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.cpp Tue Nov 10 13:31:19 2009
@@ -99,12 +99,24 @@
AggressiveAntiDepBreaker::
-AggressiveAntiDepBreaker(MachineFunction& MFi) :
+AggressiveAntiDepBreaker(MachineFunction& MFi,
+ TargetSubtarget::ExcludedRCVector& ExcludedRCs) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF)),
State(NULL), SavedState(NULL) {
+ /* Remove all registers from excluded RCs from the allocatable
+ register set. */
+ for (unsigned i = 0, e = ExcludedRCs.size(); i < e; ++i) {
+ BitVector NotRenameable = TRI->getAllocatableSet(MF, ExcludedRCs[i]).flip();
+ AllocatableSet &= NotRenameable;
+ }
+
+ DEBUG(errs() << "AntiDep Renameable Registers:");
+ DEBUG(for (int r = AllocatableSet.find_first(); r != -1;
+ r = AllocatableSet.find_next(r))
+ errs() << " " << TRI->getName(r));
}
AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
Modified: llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.h?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.h (original)
+++ llvm/branches/Apple/Leela/lib/CodeGen/AggressiveAntiDepBreaker.h Tue Nov 10 13:31:19 2009
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
@@ -112,7 +113,7 @@
/// AllocatableSet - The set of allocatable registers.
/// We'll be ignoring anti-dependencies on non-allocatable registers,
/// because they may not be safe to break.
- const BitVector AllocatableSet;
+ BitVector AllocatableSet;
/// State - The state used to identify and rename anti-dependence
/// registers.
@@ -124,7 +125,8 @@
AggressiveAntiDepState *SavedState;
public:
- AggressiveAntiDepBreaker(MachineFunction& MFi);
+ AggressiveAntiDepBreaker(MachineFunction& MFi,
+ TargetSubtarget::ExcludedRCVector& ExcludedRCs);
~AggressiveAntiDepBreaker();
/// GetMaxTrials - As anti-dependencies are broken, additional
Modified: llvm/branches/Apple/Leela/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/CodeGen/PostRASchedulerList.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/branches/Apple/Leela/lib/CodeGen/PostRASchedulerList.cpp Tue Nov 10 13:31:19 2009
@@ -216,13 +216,14 @@
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
+ SmallVector<TargetRegisterClass*, 4> ExcludedRCs;
if (EnablePostRAScheduler.getPosition() > 0) {
if (!EnablePostRAScheduler)
return false;
} else {
// Check that post-RA scheduling is enabled for this target.
const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
- if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
+ if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, ExcludedRCs))
return false;
}
@@ -243,7 +244,7 @@
(ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
- (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn) :
+ (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, ExcludedRCs) :
((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
(AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
Modified: llvm/branches/Apple/Leela/lib/CodeGen/ScheduleDAGInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/CodeGen/ScheduleDAGInstrs.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/CodeGen/ScheduleDAGInstrs.cpp (original)
+++ llvm/branches/Apple/Leela/lib/CodeGen/ScheduleDAGInstrs.cpp Tue Nov 10 13:31:19 2009
@@ -112,12 +112,13 @@
V = getUnderlyingObject(V);
if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
- MayAlias = PSV->mayAlias(MFI);
// For now, ignore PseudoSourceValues which may alias LLVM IR values
// because the code that uses this function has no way to cope with
// such aliases.
if (PSV->isAliased(MFI))
return 0;
+
+ MayAlias = PSV->mayAlias(MFI);
return V;
}
@@ -127,23 +128,6 @@
return 0;
}
-static bool mayUnderlyingObjectForInstrAlias(const MachineInstr *MI,
- const MachineFrameInfo *MFI) {
- if (!MI->hasOneMemOperand() ||
- !(*MI->memoperands_begin())->getValue() ||
- (*MI->memoperands_begin())->isVolatile())
- return true;
-
- const Value *V = (*MI->memoperands_begin())->getValue();
- if (!V)
- return true;
-
- V = getUnderlyingObject(V);
- if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
- return PSV->mayAlias(MFI);
- return true;
-}
-
void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
if (MachineLoop *ML = MLI.getLoopFor(BB))
if (BB == ML->getLoopLatch()) {
@@ -163,16 +147,15 @@
// We build scheduling units by walking a block's instruction list from bottom
// to top.
- // Remember where a generic side-effecting instruction is as we procede. If
- // ChainMMO is null, this is assumed to have arbitrary side-effects. If
- // ChainMMO is non-null, then Chain makes only a single memory reference.
- SUnit *Chain = 0;
- MachineMemOperand *ChainMMO = 0;
-
- // Memory references to specific known memory locations are tracked so that
- // they can be given more precise dependencies.
- std::map<const Value *, SUnit *> MemDefs;
- std::map<const Value *, std::vector<SUnit *> > MemUses;
+ // Remember where a generic side-effecting instruction is as we procede.
+ SUnit *BarrierChain = 0, *AliasChain = 0;
+
+ // Memory references to specific known memory locations are tracked
+ // so that they can be given more precise dependencies. We track
+ // separately the known memory locations that may alias and those
+ // that are known not to alias
+ std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
+ std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies();
@@ -347,114 +330,132 @@
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
- if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
- new_chain:
- // This is the conservative case. Add dependencies on all memory
- // references.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
- Chain = SU;
+ if (TID.isCall() || TID.hasUnmodeledSideEffects() ||
+ (MI->hasVolatileMemoryRef() &&
+ (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+ // Be conservative with these and add dependencies on all memory
+ // references, even those that are known to not alias.
+ for (std::map<const Value *, SUnit *>::iterator I =
+ NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ }
+ for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
+ NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ }
+ NonAliasMemDefs.clear();
+ NonAliasMemUses.clear();
+ // Add SU to the barrier chain.
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ BarrierChain = SU;
+
+ // fall-through
+ new_alias_chain:
+ // Chain all possibly aliasing memory references though SU.
+ if (AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ AliasChain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
- PendingLoads.clear();
- for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
- E = MemDefs.end(); I != E; ++I) {
+ for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
+ E = AliasMemDefs.end(); I != E; ++I) {
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
- I->second = SU;
}
for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
- MemUses.begin(), E = MemUses.end(); I != E; ++I) {
+ AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
- I->second.clear();
- I->second.push_back(SU);
}
- // See if it is known to just have a single memory reference.
- MachineInstr *ChainMI = Chain->getInstr();
- const TargetInstrDesc &ChainTID = ChainMI->getDesc();
- if (!ChainTID.isCall() &&
- !ChainTID.hasUnmodeledSideEffects() &&
- ChainMI->hasOneMemOperand() &&
- !(*ChainMI->memoperands_begin())->isVolatile() &&
- (*ChainMI->memoperands_begin())->getValue())
- // We know that the Chain accesses one specific memory location.
- ChainMMO = *ChainMI->memoperands_begin();
- else
- // Unknown memory accesses. Assume the worst.
- ChainMMO = 0;
+ PendingLoads.clear();
+ AliasMemDefs.clear();
+ AliasMemUses.clear();
} else if (TID.mayStore()) {
bool MayAlias = true;
TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
// A store to a specific PseudoSourceValue. Add precise dependencies.
- // Handle the def in MemDefs, if there is one.
- std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
- if (I != MemDefs.end()) {
+ // Record the def in MemDefs, first adding a dep if there is
+ // an existing def.
+ std::map<const Value *, SUnit *>::iterator I =
+ ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
+ std::map<const Value *, SUnit *>::iterator IE =
+ ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
+ if (I != IE) {
I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
/*isNormalMemory=*/true));
I->second = SU;
} else {
- MemDefs[V] = SU;
+ if (MayAlias)
+ AliasMemDefs[V] = SU;
+ else
+ NonAliasMemDefs[V] = SU;
}
// Handle the uses in MemUses, if there are any.
std::map<const Value *, std::vector<SUnit *> >::iterator J =
- MemUses.find(V);
- if (J != MemUses.end()) {
+ ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
+ std::map<const Value *, std::vector<SUnit *> >::iterator JE =
+ ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
+ if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
/*Reg=*/0, /*isNormalMemory=*/true));
J->second.clear();
}
if (MayAlias) {
- // Add dependencies from all the PendingLoads, since without
- // memoperands we must assume they alias anything.
+ // Add dependencies from all the PendingLoads, i.e. loads
+ // with no underlying object.
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
- // Add a general dependence too, if needed.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ // Add dependence on alias chain, if needed.
+ if (AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
}
+ // Add dependence on barrier chain, if needed.
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
} else {
// Treat all other stores conservatively.
- goto new_chain;
+ goto new_alias_chain;
}
} else if (TID.mayLoad()) {
bool MayAlias = true;
TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
- } else if (const Value *V =
- getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
- // A load from a specific PseudoSourceValue. Add precise dependencies.
- std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
- if (I != MemDefs.end())
- I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
- /*isNormalMemory=*/true));
- MemUses[V].push_back(SU);
-
- // Add a general dependence too, if needed.
- if (Chain && (!ChainMMO ||
- (ChainMMO->isStore() || ChainMMO->isVolatile())))
- Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
- } else if (MI->hasVolatileMemoryRef()) {
- // Treat volatile loads conservatively. Note that this includes
- // cases where memoperand information is unavailable.
- goto new_chain;
} else {
- // A "MayAlias" load. Depend on the general chain, as well as on
- // all stores. In the absense of MachineMemOperand information,
- // we can't even assume that the load doesn't alias well-behaved
- // memory locations.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
- for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
- E = MemDefs.end(); I != E; ++I) {
- SUnit *DefSU = I->second;
- if (mayUnderlyingObjectForInstrAlias(DefSU->getInstr(), MFI))
- DefSU->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ if (const Value *V =
+ getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
+ // A load from a specific PseudoSourceValue. Add precise dependencies.
+ std::map<const Value *, SUnit *>::iterator I =
+ ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
+ std::map<const Value *, SUnit *>::iterator IE =
+ ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
+ if (I != IE)
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
+ /*isNormalMemory=*/true));
+ if (MayAlias)
+ AliasMemUses[V].push_back(SU);
+ else
+ NonAliasMemUses[V].push_back(SU);
+ } else {
+ // A load with no underlying object. Depend on all
+ // potentially aliasing stores.
+ for (std::map<const Value *, SUnit *>::iterator I =
+ AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+
+ PendingLoads.push_back(SU);
+ MayAlias = true;
}
- PendingLoads.push_back(SU);
- }
+
+ // Add dependencies on alias and barrier chains, if needed.
+ if (MayAlias && AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ }
}
}
Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.cpp Tue Nov 10 13:31:19 2009
@@ -16,6 +16,7 @@
#include "llvm/GlobalValue.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/SmallVector.h"
using namespace llvm;
static cl::opt<bool>
@@ -159,3 +160,13 @@
return false;
}
+
+bool ARMSubtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const {
+ Mode = TargetSubtarget::ANTIDEP_CRITICAL;
+ ExcludedRCs.clear();
+ ExcludedRCs.push_back(&ARM::GPRRegClass);
+ return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
+}
Modified: llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.h?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.h (original)
+++ llvm/branches/Apple/Leela/lib/Target/ARM/ARMSubtarget.h Tue Nov 10 13:31:19 2009
@@ -17,6 +17,7 @@
#include "llvm/Target/TargetInstrItineraries.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtarget.h"
+#include "ARMBaseRegisterInfo.h"
#include <string>
namespace llvm {
@@ -126,13 +127,10 @@
const std::string & getCPUString() const { return CPUString; }
- /// enablePostRAScheduler - True at 'More' optimization except
- /// for Thumb1.
+ /// enablePostRAScheduler - True at 'More' optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& mode) const {
- mode = TargetSubtarget::ANTIDEP_CRITICAL;
- return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
- }
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const;
/// getInstrItins - Return the instruction itineraies based on subtarget
/// selection.
Modified: llvm/branches/Apple/Leela/lib/Target/TargetSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/TargetSubtarget.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/TargetSubtarget.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/TargetSubtarget.cpp Tue Nov 10 13:31:19 2009
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/ADT/SmallVector.h"
using namespace llvm;
//---------------------------------------------------------------------------
@@ -20,3 +21,13 @@
TargetSubtarget::TargetSubtarget() {}
TargetSubtarget::~TargetSubtarget() {}
+
+bool TargetSubtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const {
+ Mode = ANTIDEP_NONE;
+ ExcludedRCs.clear();
+ return false;
+}
+
Modified: llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.cpp?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.cpp (original)
+++ llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.cpp Tue Nov 10 13:31:19 2009
@@ -20,6 +20,7 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/ADT/SmallVector.h"
using namespace llvm;
#if defined(_MSC_VER)
@@ -455,3 +456,12 @@
if (StackAlignment)
stackAlignment = StackAlignment;
}
+
+bool X86Subtarget::enablePostRAScheduler(
+ CodeGenOpt::Level OptLevel,
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const {
+ Mode = TargetSubtarget::ANTIDEP_CRITICAL;
+ ExcludedRCs.clear();
+ return OptLevel >= CodeGenOpt::Default;
+}
Modified: llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.h?rev=86703&r1=86702&r2=86703&view=diff
==============================================================================
--- llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.h (original)
+++ llvm/branches/Apple/Leela/lib/Target/X86/X86Subtarget.h Tue Nov 10 13:31:19 2009
@@ -219,10 +219,8 @@
/// enablePostRAScheduler - X86 target is enabling post-alloc scheduling
/// at 'More' optimization level.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& mode) const {
- mode = TargetSubtarget::ANTIDEP_CRITICAL;
- return OptLevel >= CodeGenOpt::Default;
- }
+ TargetSubtarget::AntiDepBreakMode& Mode,
+ ExcludedRCVector& ExcludedRCs) const;
};
} // End llvm namespace
More information about the llvm-branch-commits
mailing list