[llvm] r337154 - run post-RA hazard recognizer pass late

Mark Searles via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 16 03:02:42 PDT 2018


Author: msearles
Date: Mon Jul 16 03:02:41 2018
New Revision: 337154

URL: http://llvm.org/viewvc/llvm-project?rev=337154&view=rev
Log:
run post-RA hazard recognizer pass late

Memory legalizer, waitcnt, and shrink  passes can perturb the instructions,
which means that the post-RA hazard recognizer pass should run after them.
Otherwise, one of those passes may invalidate the work done by the hazard
recognizer. Note that this has adverse side-effect that any consecutive
S_NOP 0's, emitted by the hazard recognizer, will not be shrunk into a
single S_NOP <N>. This should be addressed in a follow-on patch.

Differential Revision: https://reviews.llvm.org/D49288

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
    llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?rev=337154&r1=337153&r2=337154&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Mon Jul 16 03:02:41 2018
@@ -886,6 +886,10 @@ void GCNPassConfig::addPreSched2() {
 }
 
 void GCNPassConfig::addPreEmitPass() {
+  addPass(createSIMemoryLegalizerPass());
+  addPass(createSIInsertWaitcntsPass());
+  addPass(createSIShrinkInstructionsPass());
+
   // The hazard recognizer that runs as part of the post-ra scheduler does not
   // guarantee to be able handle all hazards correctly. This is because if there
   // are multiple scheduling regions in a basic block, the regions are scheduled
@@ -894,11 +898,11 @@ void GCNPassConfig::addPreEmitPass() {
   //
   // Here we add a stand-alone hazard recognizer pass which can handle all
   // cases.
+  //
+  // FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
+  // be better for it to emit S_NOP <N> when possible.
   addPass(&PostRAHazardRecognizerID);
 
-  addPass(createSIMemoryLegalizerPass());
-  addPass(createSIInsertWaitcntsPass());
-  addPass(createSIShrinkInstructionsPass());
   addPass(&SIInsertSkipsPassID);
   addPass(createSIDebuggerInsertNopsPass());
   addPass(&BranchRelaxationPassID);

Modified: llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp?rev=337154&r1=337153&r2=337154&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp Mon Jul 16 03:02:41 2018
@@ -737,7 +737,7 @@ bool SIGfx6CacheControl::insertWait(Mach
     case SIAtomicScope::WAVEFRONT:
     case SIAtomicScope::SINGLETHREAD:
       // The L1 cache keeps all memory operations in order for
-      // wavesfronts in the same work-group.
+      // wavefronts in the same work-group.
       break;
     default:
       llvm_unreachable("Unsupported synchronization scope");

Modified: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll?rev=337154&r1=337153&r2=337154&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.mov.dpp.ll Mon Jul 16 03:02:41 2018
@@ -6,7 +6,9 @@
 ; VI-LABEL: {{^}}dpp_test:
 ; VI: v_mov_b32_e32 v0, s{{[0-9]+}}
 ; VI-NOOPT: v_mov_b32_e32 v1, s{{[0-9]+}}
-; VI: s_nop 1
+; VI-OPT: s_nop 1
+; VI-NOOPT: s_nop 0
+; VI-NOOPT: s_nop 0
 ; VI-OPT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x00,0x01,0x08,0x11]
 ; VI-NOOPT: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11]
 define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in) {
@@ -18,10 +20,14 @@ define amdgpu_kernel void @dpp_test(i32
 ; VI-LABEL: {{^}}dpp_wait_states:
 ; VI-NOOPT: v_mov_b32_e32 [[VGPR1:v[0-9]+]], s{{[0-9]+}}
 ; VI: v_mov_b32_e32 [[VGPR0:v[0-9]+]], s{{[0-9]+}}
-; VI: s_nop 1
+; VI-OPT: s_nop 1
+; VI-NOOPT: s_nop 0
+; VI-NOOPT: s_nop 0
 ; VI-OPT: v_mov_b32_dpp [[VGPR0]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-; VI: s_nop 1
+; VI-NOOPT: v_mov_b32_dpp [[VGPR1]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:
+; VI-OPT: s_nop 1
+; VI-NOOPT: s_nop 0
+; VI-NOOPT: s_nop 0
 ; VI-OPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
 ; VI-NOOPT: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
 define amdgpu_kernel void @dpp_wait_states(i32 addrspace(1)* %out, i32 %in) {
@@ -35,11 +41,14 @@ define amdgpu_kernel void @dpp_wait_stat
 ; VI: ; %endif
 ; VI-OPT: s_mov_b32
 ; VI-OPT: s_mov_b32
-; VI-NOOPT: s_nop 1
+; VI-NOOPT: s_waitcnt
+; VI-NOOPT-NEXT: s_nop 0
 ; VI: v_mov_b32_dpp [[VGPR0:v[0-9]+]], v{{[0-9]+}} quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-; VI: s_nop 1
+; VI-OPT: s_nop 1
 ; VI: v_mov_b32_dpp [[VGPR1:v[0-9]+]], [[VGPR0]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
-; VI: s_nop 1
+; VI-OPT: s_nop 1
+; VI-NOOPT: s_nop 0
+; VI-NOOPT: s_nop 0
 ; VI: v_mov_b32_dpp v{{[0-9]+}}, [[VGPR1]] quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0
 define amdgpu_kernel void @dpp_first_in_bb(float addrspace(1)* %out, float addrspace(1)* %in, float %cond, float %a, float %b) {
   %cmp = fcmp oeq float %cond, 0.0

Modified: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll?rev=337154&r1=337153&r2=337154&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll Mon Jul 16 03:02:41 2018
@@ -4,7 +4,9 @@
 ; VI-LABEL: {{^}}dpp_test:
 ; VI: v_mov_b32_e32 v0, s{{[0-9]+}}
 ; VI: v_mov_b32_e32 v1, s{{[0-9]+}}
-; VI: s_nop 1
+; VI-OPT: s_nop 1
+; VI-NOOPT: s_nop 0
+; VI-NOOPT: s_nop 0
 ; VI: v_mov_b32_dpp v0, v1 quad_perm:[1,0,0,0] row_mask:0x1 bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x02,0x00,0x7e,0x01,0x01,0x08,0x11]
 define amdgpu_kernel void @dpp_test(i32 addrspace(1)* %out, i32 %in1, i32 %in2) {
   %tmp0 = call i32 @llvm.amdgcn.update.dpp.i32(i32 %in1, i32 %in2, i32 1, i32 1, i32 1, i1 1) #0
@@ -12,6 +14,33 @@ define amdgpu_kernel void @dpp_test(i32
   ret void
 }
 
+; VI-LABEL: {{^}}dpp_test1:
+; VI-OPT: v_add_u32_e32 [[REG:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOOPT: v_mov_b32_e32 v{{[0-9]+}}, 0
+; VI-NOOPT: v_mov_b32_e32 [[REG:v[0-9]+]], v{{[0-9]+}}
+; VI-NEXT: s_nop 0
+; VI-NEXT: s_nop 0
+; VI-NEXT: v_mov_b32_dpp v2, [[REG]] quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
+ at 0 = internal unnamed_addr addrspace(3) global [448 x i32] undef, align 4
+define weak_odr amdgpu_kernel void @dpp_test1(i32* %arg) local_unnamed_addr {
+bb:
+  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %tmp1 = zext i32 %tmp to i64
+  %tmp2 = getelementptr inbounds [448 x i32], [448 x i32] addrspace(3)* @0, i32 0, i32 %tmp
+  %tmp3 = load i32, i32 addrspace(3)* %tmp2, align 4
+  fence syncscope("workgroup") release
+  tail call void @llvm.amdgcn.s.barrier()
+  fence syncscope("workgroup") acquire
+  %tmp4 = add nsw i32 %tmp3, %tmp3
+  %tmp5 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp4, i32 177, i32 15, i32 15, i1 zeroext false)
+  %tmp6 = add nsw i32 %tmp5, %tmp4
+  %tmp7 = getelementptr inbounds i32, i32* %arg, i64 %tmp1
+  store i32 %tmp6, i32* %tmp7, align 4
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare void @llvm.amdgcn.s.barrier()
 declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1) #0
 
 attributes #0 = { nounwind readnone convergent }

Modified: llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll?rev=337154&r1=337153&r2=337154&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/memory_clause.ll Mon Jul 16 03:02:41 2018
@@ -77,6 +77,7 @@ bb:
 ; GCN-NEXT: buffer_load_dword
 ; GCN-NEXT: buffer_load_dword
 ; GCN-NEXT: s_nop
+; GCN-NEXT: s_nop
 ; GCN-NEXT: buffer_load_dword
 define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %arg, <4 x i32> addrspace(5)* noalias nocapture %arg1) {
 bb:
@@ -105,8 +106,9 @@ bb:
 
 ; GCN-LABEL: {{^}}vector_clause_indirect:
 ; GCN: global_load_dwordx2 [[ADDR:v\[[0-9:]+\]]], v[{{[0-9:]+}}], off
-; GCN-NEXT: s_nop
+; GCN-NEXT: s_nop 0
 ; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: s_nop 0
 ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off
 ; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off offset:16
 define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture readnone %arg1, <4 x i32> addrspace(1)* noalias nocapture %arg2) {
@@ -128,6 +130,7 @@ bb:
 ; GCN-LABEL: {{^}}load_global_d16_hi:
 ; GCN:      global_load_short_d16_hi v
 ; GCN-NEXT: s_nop
+; GCN-NEXT: s_nop
 ; GCN-NEXT: global_load_short_d16_hi v
 define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrspace(1)* %out) {
 entry:
@@ -147,6 +150,7 @@ entry:
 ; GCN-LABEL: {{^}}load_global_d16_lo:
 ; GCN:      global_load_short_d16 v
 ; GCN-NEXT: s_nop
+; GCN-NEXT: s_nop
 ; GCN-NEXT: global_load_short_d16 v
 define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrspace(1)* %out) {
 entry:




More information about the llvm-commits mailing list