[llvm] 58f1417 - [AMDGPU] Order pos exports before param exports

Carl Ritson via llvm-commits llvm-commits at lists.llvm.org
Tue May 12 07:03:58 PDT 2020


Author: Carl Ritson
Date: 2020-05-12T23:02:23+09:00
New Revision: 58f1417ebc1a9714258586a5b873b2d4608ce6a9

URL: https://github.com/llvm/llvm-project/commit/58f1417ebc1a9714258586a5b873b2d4608ce6a9
DIFF: https://github.com/llvm/llvm-project/commit/58f1417ebc1a9714258586a5b873b2d4608ce6a9.diff

LOG: [AMDGPU] Order pos exports before param exports

Summary:
Modify export clustering DAG mutation to move position exports
before other exports types.

Reviewers: foad, arsenm, rampitec, nhaehnle

Reviewed By: foad

Subscribers: kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79670

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
    llvm/test/CodeGen/AMDGPU/wait.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp
index 42ff12ddda2b..cbc248fbd9c8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUExportClustering.cpp
@@ -32,53 +32,87 @@ static bool isExport(const SUnit &SU) {
          MI->getOpcode() == AMDGPU::EXP_DONE;
 }
 
+static bool isPositionExport(const SIInstrInfo *TII, SUnit *SU) {
+  const MachineInstr *MI = SU->getInstr();
+  int Imm = TII->getNamedOperand(*MI, AMDGPU::OpName::tgt)->getImm();
+  return Imm >= 12 && Imm <= 15;
+}
+
+static void sortChain(const SIInstrInfo *TII, SmallVector<SUnit *, 8> &Chain,
+                      unsigned PosCount) {
+  if (!PosCount || PosCount == Chain.size())
+    return;
+
+  // Position exports should occur as soon as possible in the shader
+  // for optimal performance.  This moves position exports before
+  // other exports while preserving the order within 
diff erent export
+  // types (pos or other).
+  SmallVector<SUnit *, 8> Copy(Chain);
+  unsigned PosIdx = 0;
+  unsigned OtherIdx = PosCount;
+  for (SUnit *SU : Copy) {
+    if (isPositionExport(TII, SU))
+      Chain[PosIdx++] = SU;
+    else
+      Chain[OtherIdx++] = SU;
+  }
+}
+
 static void buildCluster(ArrayRef<SUnit *> Exports, ScheduleDAGInstrs *DAG) {
-  // Cluster a series of exports. Also copy all dependencies to the first
-  // export to avoid computation being inserted into the chain.
-  SUnit *ChainHead = Exports[0];
+  SUnit *ChainHead = Exports.front();
+
+  // Now construct cluster from chain by adding new edges.
   for (unsigned Idx = 0, End = Exports.size() - 1; Idx < End; ++Idx) {
     SUnit *SUa = Exports[Idx];
     SUnit *SUb = Exports[Idx + 1];
-    if (DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
-      for (const SDep &Pred : SUb->Preds) {
-        SUnit *PredSU = Pred.getSUnit();
-        if (Pred.isWeak() || isExport(*PredSU))
-          continue;
+
+    // Copy all dependencies to the head of the chain to avoid any
+    // computation being inserted into the chain.
+    for (const SDep &Pred : SUb->Preds) {
+      SUnit *PredSU = Pred.getSUnit();
+      if (!isExport(*PredSU) && !Pred.isWeak())
         DAG->addEdge(ChainHead, SDep(PredSU, SDep::Artificial));
-      }
     }
+
+    // New barrier edge ordering exports
+    DAG->addEdge(SUb, SDep(SUa, SDep::Barrier));
+    // Also add cluster edge
+    DAG->addEdge(SUb, SDep(SUa, SDep::Cluster));
   }
 }
 
 void ExportClustering::apply(ScheduleDAGInstrs *DAG) {
-  SmallVector<SmallVector<SUnit *, 8>, 4> ExportChains;
-  DenseMap<unsigned, unsigned> ChainMap;
+  const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII);
+
+  SmallVector<SUnit *, 8> Chain;
 
-  // Build chains of exports
+  // Pass through DAG gathering a list of exports and removing barrier edges
+  // creating dependencies on exports. Freeing exports of successor edges
+  // allows more scheduling freedom, and nothing should be order dependent
+  // on exports.  Edges will be added later to order the exports.
+  unsigned PosCount = 0;
   for (SUnit &SU : DAG->SUnits) {
-    if (!isExport(SU))
-      continue;
+    if (isExport(SU)) {
+      Chain.push_back(&SU);
+      if (isPositionExport(TII, &SU))
+        PosCount++;
+    }
 
-    unsigned ChainID = ExportChains.size();
+    SmallVector<SDep, 2> ToRemove;
     for (const SDep &Pred : SU.Preds) {
-      const SUnit &PredSU = *Pred.getSUnit();
-      if (isExport(PredSU) && !Pred.isArtificial()) {
-        ChainID = ChainMap.lookup(PredSU.NodeNum);
-        break;
-      }
+      SUnit *PredSU = Pred.getSUnit();
+      if (Pred.isBarrier() && isExport(*PredSU))
+        ToRemove.push_back(Pred);
     }
-    ChainMap[SU.NodeNum] = ChainID;
-
-    if (ChainID == ExportChains.size())
-      ExportChains.push_back(SmallVector<SUnit *, 8>());
-
-    auto &Chain = ExportChains[ChainID];
-    Chain.push_back(&SU);
+    for (SDep Pred : ToRemove)
+      SU.removePred(Pred);
   }
 
-  // Apply clustering
-  for (auto &Chain : ExportChains)
+  // Apply clustering if there are multiple exports
+  if (Chain.size() > 1) {
+    sortChain(TII, Chain, PosCount);
     buildCluster(Chain, DAG);
+  }
 }
 
 } // end namespace

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
index d6d80246a89b..a9e1f1859a2e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.exp.ll
@@ -3,6 +3,7 @@
 
 declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
 declare void @llvm.amdgcn.exp.i32(i32, i32, i32, i32, i32, i32, i1, i1) #1
+declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32) #2
 
 ; GCN-LABEL: {{^}}test_export_zeroes_f32:
 ; GCN: exp mrt0 off, off, off, off{{$}}
@@ -557,5 +558,50 @@ define amdgpu_kernel void @test_export_clustering(float %x, float %y) #0 {
   ret void
 }
 
+; GCN-LABEL: {{^}}test_export_pos_before_param:
+; GCN: exp pos0
+; GCN-NOT: s_waitcnt
+; GCN: exp param0
+define amdgpu_kernel void @test_export_pos_before_param(float %x, float %y) #0 {
+  %z0 = fadd float %x, %y
+  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
+  %z1 = fsub float %y, %x
+  call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos_before_param_ordered:
+; GCN: exp pos0
+; GCN: exp pos1
+; GCN: exp pos2
+; GCN-NOT: s_waitcnt
+; GCN: exp param0
+; GCN: exp param1
+; GCN: exp param2
+define amdgpu_kernel void @test_export_pos_before_param_ordered(float %x, float %y) #0 {
+  %z0 = fadd float %x, %y
+  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
+  call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
+  call void @llvm.amdgcn.exp.f32(i32 34, i32 15, float 1.0, float 1.0, float 1.0, float %z0, i1 false, i1 false)
+  %z1 = fsub float %y, %x
+  call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false)
+  call void @llvm.amdgcn.exp.f32(i32 13, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 false, i1 false)
+  call void @llvm.amdgcn.exp.f32(i32 14, i32 15, float 0.0, float 0.0, float 0.0, float %z1, i1 true, i1 false)
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_export_pos_before_param_across_load:
+; GCN: exp pos0
+; GCN-NEXT: exp param0
+; GCN-NEXT: exp param1
+define amdgpu_kernel void @test_export_pos_before_param_across_load(i32 %idx) #0 {
+  call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float 1.0, float 1.0, float 1.0, float 1.0, i1 false, i1 false)
+  call void @llvm.amdgcn.exp.f32(i32 33, i32 15, float 1.0, float 1.0, float 1.0, float 0.5, i1 false, i1 false)
+  %load = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0)
+  call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float 0.0, float 0.0, float 0.0, float %load, i1 true, i1 false)
+  ret void
+}
+
 attributes #0 = { nounwind }
 attributes #1 = { nounwind inaccessiblememonly }
+attributes #2 = { nounwind readnone }

diff  --git a/llvm/test/CodeGen/AMDGPU/wait.ll b/llvm/test/CodeGen/AMDGPU/wait.ll
index 8d6864f967e9..37c64f233594 100644
--- a/llvm/test/CodeGen/AMDGPU/wait.ll
+++ b/llvm/test/CodeGen/AMDGPU/wait.ll
@@ -10,8 +10,8 @@
 ; DEFAULT: s_waitcnt lgkmcnt(0)
 ; DEFAULT: buffer_load_format_xyzw
 ; DEFAULT: buffer_load_format_xyzw
-; DEFAULT: s_waitcnt vmcnt(0)
-; DEFAULT: exp
+; DEFAULT-DAG: s_waitcnt vmcnt(0)
+; DEFAULT-DAG: exp
 ; DEFAULT: exp
 ; DEFAULT-NEXT: s_endpgm
 define amdgpu_vs void @main(<16 x i8> addrspace(4)* inreg %arg, <16 x i8> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, <16 x i8> addrspace(4)* inreg %arg3, <16 x i8> addrspace(4)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(4)* inreg %constptr) #0 {


        


More information about the llvm-commits mailing list