[llvm-commits] [llvm] r78736 - in /llvm/trunk/lib: CodeGen/ExactHazardRecognizer.cpp Target/ARM/ARM.td Target/ARM/ARMSchedule.td Target/ARM/ARMScheduleV6.td Target/ARM/ARMScheduleV7.td
David Goodwin
david_goodwin at apple.com
Tue Aug 11 15:38:43 PDT 2009
Author: david_goodwin
Date: Tue Aug 11 17:38:43 2009
New Revision: 78736
URL: http://llvm.org/viewvc/llvm-project?rev=78736&view=rev
Log:
Allow a zero cycle stage to reserve/require a FU without advancing the cycle counter.
Modified:
llvm/trunk/lib/CodeGen/ExactHazardRecognizer.cpp
llvm/trunk/lib/Target/ARM/ARM.td
llvm/trunk/lib/Target/ARM/ARMSchedule.td
llvm/trunk/lib/Target/ARM/ARMScheduleV6.td
llvm/trunk/lib/Target/ARM/ARMScheduleV7.td
Modified: llvm/trunk/lib/CodeGen/ExactHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ExactHazardRecognizer.cpp?rev=78736&r1=78735&r2=78736&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/ExactHazardRecognizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/ExactHazardRecognizer.cpp Tue Aug 11 17:38:43 2009
@@ -39,7 +39,7 @@
unsigned ItinDepth = 0;
for (; IS != E; ++IS)
- ItinDepth += IS->Cycles;
+ ItinDepth += std::max(1U, IS->Cycles);
ScoreboardDepth = std::max(ScoreboardDepth, ItinDepth);
}
@@ -89,9 +89,13 @@
unsigned idx = SU->getInstr()->getDesc().getSchedClass();
for (const InstrStage *IS = ItinData.begin(idx), *E = ItinData.end(idx);
IS != E; ++IS) {
+ // If the stages cycles are 0, then we must have the FU free in
+ // the current cycle, but we don't advance the cycle time .
+ unsigned StageCycles = std::max(1U, IS->Cycles);
+
// We must find one of the stage's units free for every cycle the
// stage is occupied.
- for (unsigned int i = 0; i < IS->Cycles; ++i) {
+ for (unsigned int i = 0; i < StageCycles; ++i) {
assert((cycle < ScoreboardDepth) && "Scoreboard depth exceeded!");
unsigned index = getFutureIndex(cycle);
@@ -103,7 +107,8 @@
return Hazard;
}
- ++cycle;
+ if (IS->Cycles > 0)
+ ++cycle;
}
}
@@ -118,9 +123,13 @@
unsigned idx = SU->getInstr()->getDesc().getSchedClass();
for (const InstrStage *IS = ItinData.begin(idx), *E = ItinData.end(idx);
IS != E; ++IS) {
+ // If the stages cycles are 0, then we must reserve the FU in the
+ // current cycle, but we don't advance the cycle time .
+ unsigned StageCycles = std::max(1U, IS->Cycles);
+
// We must reserve one of the stage's units for every cycle the
// stage is occupied.
- for (unsigned int i = 0; i < IS->Cycles; ++i) {
+ for (unsigned int i = 0; i < StageCycles; ++i) {
assert((cycle < ScoreboardDepth) && "Scoreboard depth exceeded!");
unsigned index = getFutureIndex(cycle);
@@ -135,7 +144,9 @@
assert(freeUnit && "No function unit available!");
Scoreboard[index] |= freeUnit;
- ++cycle;
+
+ if (IS->Cycles > 0)
+ ++cycle;
}
}
Modified: llvm/trunk/lib/Target/ARM/ARM.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARM.td?rev=78736&r1=78735&r2=78736&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARM.td (original)
+++ llvm/trunk/lib/Target/ARM/ARM.td Tue Aug 11 17:38:43 2009
@@ -114,7 +114,7 @@
// V7 Processors.
def : Processor<"cortex-a8", CortexA8Itineraries,
[ArchV7A, FeatureThumb2, FeatureNEON, FeatureNEONFP]>;
-def : Processor<"cortex-a9", V7Itineraries,
+def : Processor<"cortex-a9", CortexA9Itineraries,
[ArchV7A, FeatureThumb2, FeatureNEON]>;
//===----------------------------------------------------------------------===//
Modified: llvm/trunk/lib/Target/ARM/ARMSchedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMSchedule.td?rev=78736&r1=78735&r2=78736&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMSchedule.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMSchedule.td Tue Aug 11 17:38:43 2009
@@ -10,8 +10,9 @@
//===----------------------------------------------------------------------===//
// Functional units across ARM processors
//
-def FU_Pipe0 : FuncUnit; // pipeline 0 issue
-def FU_Pipe1 : FuncUnit; // pipeline 1 issue
+def FU_Issue : FuncUnit; // issue
+def FU_Pipe0 : FuncUnit; // pipeline 0
+def FU_Pipe1 : FuncUnit; // pipeline 1
def FU_LdSt0 : FuncUnit; // pipeline 0 load/store
def FU_LdSt1 : FuncUnit; // pipeline 1 load/store
@@ -19,9 +20,11 @@
// Instruction Itinerary classes used for ARM
//
def IIC_iALU : InstrItinClass;
+def IIC_iMPY : InstrItinClass;
def IIC_iLoad : InstrItinClass;
def IIC_iStore : InstrItinClass;
def IIC_fpALU : InstrItinClass;
+def IIC_fpMPY : InstrItinClass;
def IIC_fpLoad : InstrItinClass;
def IIC_fpStore : InstrItinClass;
def IIC_Br : InstrItinClass;
@@ -31,12 +34,14 @@
def GenericItineraries : ProcessorItineraries<[
InstrItinData<IIC_iALU , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_iMPY , [InstrStage<1, [FU_Pipe0]>]>,
InstrItinData<IIC_iLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
- InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
InstrItinData<IIC_iStore , [InstrStage<1, [FU_Pipe0]>]>,
- InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>,
InstrItinData<IIC_fpALU , [InstrStage<1, [FU_Pipe0]>]>,
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>
+ InstrItinData<IIC_fpMPY , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
+ InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>
]>;
Modified: llvm/trunk/lib/Target/ARM/ARMScheduleV6.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMScheduleV6.td?rev=78736&r1=78735&r2=78736&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMScheduleV6.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMScheduleV6.td Tue Aug 11 17:38:43 2009
@@ -11,18 +11,16 @@
//
//===----------------------------------------------------------------------===//
+// TODO: this should model an ARM11
// Single issue pipeline so every itinerary starts with FU_pipe0
def V6Itineraries : ProcessorItineraries<[
- // single-cycle integer ALU
InstrItinData<IIC_iALU , [InstrStage<1, [FU_Pipe0]>]>,
- // loads have an extra cycle of latency, but are fully pipelined
+ InstrItinData<IIC_iMPY , [InstrStage<1, [FU_Pipe0]>]>,
InstrItinData<IIC_iLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
- InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
- // fully-pipelined stores
InstrItinData<IIC_iStore , [InstrStage<1, [FU_Pipe0]>]>,
- InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>,
- // fp ALU is not pipelined
- InstrItinData<IIC_fpALU , [InstrStage<6, [FU_Pipe0]>]>,
- // no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>
+ InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpALU , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpMPY , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
+ InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>
]>;
Modified: llvm/trunk/lib/Target/ARM/ARMScheduleV7.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMScheduleV7.td?rev=78736&r1=78735&r2=78736&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMScheduleV7.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMScheduleV7.td Tue Aug 11 17:38:43 2009
@@ -11,34 +11,51 @@
//
//===----------------------------------------------------------------------===//
-// Single issue pipeline so every itinerary starts with FU_Pipe0
-def V7Itineraries : ProcessorItineraries<[
- // single-cycle integer ALU
- InstrItinData<IIC_iALU , [InstrStage<1, [FU_Pipe0]>]>,
- // loads have an extra cycle of latency, but are fully pipelined
- InstrItinData<IIC_iLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
- InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
- // fully-pipelined stores
- InstrItinData<IIC_iStore , [InstrStage<1, [FU_Pipe0]>]>,
- InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>,
- // fp ALU is not pipelined
- InstrItinData<IIC_fpALU , [InstrStage<6, [FU_Pipe0]>]>,
- // no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>
-]>;
-
// Dual issue pipeline so every itinerary starts with FU_Pipe0 | FU_Pipe1
def CortexA8Itineraries : ProcessorItineraries<[
- // single-cycle integer ALU
+ // two fully-pipelined integer ALU pipelines
InstrItinData<IIC_iALU , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
+ // one fully-pipelined integer Multiply pipeline
+ // function units are used in alpha order, so use FU_Pipe1
+ // for the Multiple pipeline
+ InstrItinData<IIC_iMPY , [InstrStage<1, [FU_Pipe1]>]>,
// loads have an extra cycle of latency, but are fully pipelined
- InstrItinData<IIC_iLoad , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>, InstrStage<1, [FU_LdSt0]>]>,
- InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>, InstrStage<1, [FU_LdSt0]>]>,
+ // use a 0 cycle FU_Issue to enforce the 1 load/store per cycle limit
+ InstrItinData<IIC_iLoad , [InstrStage<0, [FU_Issue]>,
+ InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
+ InstrStage<1, [FU_LdSt0]>]>,
// fully-pipelined stores
- InstrItinData<IIC_iStore , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
- InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
- // fp ALU is not pipelined
- InstrItinData<IIC_fpALU , [InstrStage<6, [FU_Pipe0, FU_Pipe1]>]>,
+ // use a 0 cycle FU_Issue to enforce the 1 load/store per cycle limit
+ InstrItinData<IIC_iStore , [InstrStage<0, [FU_Issue]>,
+ InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
// no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>
+ InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
+
+ // VFP ALU is not pipelined so stall all issues
+ // FIXME assume NFP pipeline and 7 cycle non-pipelined latency
+ InstrItinData<IIC_fpALU , [InstrStage<7, [FU_Pipe0, FU_Pipe1]>]>,
+ // VFP MPY is not pipelined so stall all issues
+ // FIXME assume NFP pipeline and 7 cycle non-pipelined latency
+ InstrItinData<IIC_fpMPY , [InstrStage<7, [FU_Pipe0, FU_Pipe1]>]>,
+ // loads have an extra cycle of latency, but are fully pipelined
+ // use a 0 cycle FU_Issue to enforce the 1 load/store per cycle limit
+ InstrItinData<IIC_fpLoad , [InstrStage<0, [FU_Issue]>,
+ InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
+ InstrStage<1, [FU_LdSt0]>]>,
+ // use a 0 cycle FU_Issue to enforce the 1 load/store per cycle limit
+ InstrItinData<IIC_fpStore , [InstrStage<0, [FU_Issue]>,
+ InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>
+]>;
+
+// FIXME
+def CortexA9Itineraries : ProcessorItineraries<[
+ InstrItinData<IIC_iALU , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_iMPY , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_iLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
+ InstrItinData<IIC_iStore , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpALU , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpMPY , [InstrStage<1, [FU_Pipe0]>]>,
+ InstrItinData<IIC_fpLoad , [InstrStage<1, [FU_Pipe0]>, InstrStage<1, [FU_LdSt0]>]>,
+ InstrItinData<IIC_fpStore , [InstrStage<1, [FU_Pipe0]>]>
]>;
More information about the llvm-commits
mailing list