[llvm] r268260 - AMDGPU/SI: Use the hazard recognizer to break SMEM soft clauses
Tom Stellard via llvm-commits
llvm-commits at lists.llvm.org
Mon May 2 10:39:07 PDT 2016
Author: tstellar
Date: Mon May 2 12:39:06 2016
New Revision: 268260
URL: http://llvm.org/viewvc/llvm-project?rev=268260&view=rev
Log:
AMDGPU/SI: Use the hazard recognizer to break SMEM soft clauses
Summary:
Add support for detecting hazards in SMEM soft clauses, so that we only
break the clauses when necessary, either by adding s_nop or re-ordering
other alu instructions.
Reviewers: arsenm
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D18870
Modified:
llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h
llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp
llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
Modified: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp Mon May 2 12:39:06 2016
@@ -132,18 +132,86 @@ int GCNHazardRecognizer::getWaitStatesSi
// No-op Hazard Detection
//===----------------------------------------------------------------------===//
+static void addRegsToSet(iterator_range<MachineInstr::const_mop_iterator> Ops,
+ std::set<unsigned> &Set) {
+ for (const MachineOperand &Op : Ops) {
+ if (Op.isReg())
+ Set.insert(Op.getReg());
+ }
+}
+
+int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) {
+ const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
+
+ // SMEM soft clause are only present on VI+
+ if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
+ return 0;
+
+ // A soft-clause is any group of consecutive SMEM instructions. The
+ // instructions in this group may return out of order and/or may be
+ // replayed (i.e. the same instruction issued more than once).
+ //
+ // In order to handle these situations correctly we need to make sure
+ // that when a clause has more than one instruction, no instruction in the
+ // clause writes to a register that is read another instruction in the clause
+ // (including itself). If we encounter this situaion, we need to break the
+ // clause by inserting a non SMEM instruction.
+
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ std::set<unsigned> ClauseDefs;
+ std::set<unsigned> ClauseUses;
+
+ for (MachineInstr *MI : EmittedInstrs) {
+
+ // When we hit a non-SMEM instruction then we have passed the start of the
+ // clause and we can stop.
+ if (!MI || !TII->isSMRD(*MI))
+ break;
+
+ addRegsToSet(MI->defs(), ClauseDefs);
+ addRegsToSet(MI->uses(), ClauseUses);
+ }
+
+ if (ClauseDefs.empty())
+ return 0;
+
+ // FIXME: When we support stores, we need to make sure not to put loads and
+ // stores in the same clause if they use the same address. For now, just
+ // start a new clause whenever we see a store.
+ if (SMEM->mayStore())
+ return 1;
+
+ addRegsToSet(SMEM->defs(), ClauseDefs);
+ addRegsToSet(SMEM->uses(), ClauseUses);
+
+ std::vector<unsigned> Result(std::max(ClauseDefs.size(), ClauseUses.size()));
+ std::vector<unsigned>::iterator End;
+
+ End = std::set_intersection(ClauseDefs.begin(), ClauseDefs.end(),
+ ClauseUses.begin(), ClauseUses.end(), Result.begin());
+
+ // If the set of defs and uses intersect then we cannot add this instruction
+ // to the clause, so we have a hazard.
+ if (End != Result.begin())
+ return 1;
+
+ return 0;
+}
+
int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
+ int WaitStatesNeeded = 0;
+
+ WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD);
// This SMRD hazard only affects SI.
if (ST.getGeneration() != AMDGPUSubtarget::SOUTHERN_ISLANDS)
- return 0;
+ return WaitStatesNeeded;
// A read of an SGPR by SMRD instruction requires 4 wait states when the
// SGPR was written by a VALU instruction.
int SmrdSgprWaitStates = 4;
- int WaitStatesNeeded = 0;
auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
for (const MachineOperand &Use : SMRD->uses()) {
Modified: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h Mon May 2 12:39:06 2016
@@ -38,6 +38,7 @@ class GCNHazardRecognizer final : public
std::function<bool(MachineInstr*)> IsHazardDef =
[](MachineInstr*) {return true;});
+ int checkSMEMSoftClauseHazards(MachineInstr *SMEM);
int checkSMRDHazards(MachineInstr *SMRD);
int checkVMEMHazards(MachineInstr* VMEM);
int checkDPPHazards(MachineInstr *DPP);
Modified: llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp Mon May 2 12:39:06 2016
@@ -314,8 +314,7 @@ void SIInsertWaits::pushInstruction(Mach
// and destination registers don't overlap, e.g. this is illegal:
// r0 = load r2
// r2 = load r0
- if ((LastOpcodeType == SMEM && TII->isSMRD(*I)) ||
- (LastOpcodeType == VMEM && Increment.Named.VM)) {
+ if (LastOpcodeType == VMEM && Increment.Named.VM) {
// Insert a NOP to break the clause.
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
.addImm(0);
Modified: llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll Mon May 2 12:39:06 2016
@@ -23,8 +23,8 @@ define void @lds_atomic_cmpxchg_ret_i32_
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_ret_i64_offset:
; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
-; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7
; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0
; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
@@ -75,8 +75,8 @@ define void @lds_atomic_cmpxchg_noret_i3
; FUNC-LABEL: {{^}}lds_atomic_cmpxchg_noret_i64_offset:
; SICI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SICI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; VI: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24
-; VI: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-DAG: s_load_dword [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI-DAG: s_load_dwordx2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
; GCN-DAG: v_mov_b32_e32 v[[LOVCMP:[0-9]+]], 7
; GCN-DAG: v_mov_b32_e32 v[[HIVCMP:[0-9]+]], 0
; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
Modified: llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll Mon May 2 12:39:06 2016
@@ -1,11 +1,11 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
; FIXME: Check something here. Currently it seems fabs + fneg aren't
; into 2 modifiers, although theoretically that should work.
-; FUNC-LABEL: {{^}}fneg_fabs_fadd_f64:
-; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}|
+; GCN-LABEL: {{^}}fneg_fabs_fadd_f64:
+; GCN: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}|
define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
%fsub = fsub double -0.000000e+00, %fabs
@@ -24,8 +24,8 @@ define void @v_fneg_fabs_fadd_f64(double
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_fmul_f64:
-; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}|
+; GCN-LABEL: {{^}}fneg_fabs_fmul_f64:
+; GCN: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}|
define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) {
%fabs = call double @llvm.fabs.f64(double %x)
%fsub = fsub double -0.000000e+00, %fabs
@@ -34,7 +34,7 @@ define void @fneg_fabs_fmul_f64(double a
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_free_f64:
+; GCN-LABEL: {{^}}fneg_fabs_free_f64:
define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fabs = call double @llvm.fabs.f64(double %bc)
@@ -43,9 +43,9 @@ define void @fneg_fabs_free_f64(double a
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_fn_free_f64:
-; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN-LABEL: {{^}}fneg_fabs_fn_free_f64:
+; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
%bc = bitcast i64 %in to double
%fabs = call double @fabs(double %bc)
@@ -54,13 +54,14 @@ define void @fneg_fabs_fn_free_f64(doubl
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_f64:
-; SI: s_load_dwordx2
-; SI: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}
-; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
-; SI-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]]
-; SI-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]]
-; SI: buffer_store_dwordx2 v{{\[}}[[LO_V]]:[[HI_V]]{{\]}}
+; GCN-LABEL: {{^}}fneg_fabs_f64:
+; GCN: s_load_dwordx2
+; GCN-DAG: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
+; SI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0xb
+; VI-DAG: s_load_dwordx2 s{{\[}}[[LO_X:[0-9]+]]:[[HI_X:[0-9]+]]{{\]}}, s[{{[0-9]+:[0-9]+}}], 0x2c
+; GCN-DAG: v_or_b32_e32 v[[HI_V:[0-9]+]], s[[HI_X]], [[IMMREG]]
+; GCN-DAG: v_mov_b32_e32 v[[LO_V:[0-9]+]], s[[LO_X]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[LO_V]]:[[HI_V]]{{\]}}
define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
%fabs = call double @llvm.fabs.f64(double %in)
%fsub = fsub double -0.000000e+00, %fabs
@@ -68,11 +69,11 @@ define void @fneg_fabs_f64(double addrsp
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_v2f64:
-; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
-; SI-NOT: 0x80000000
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN-LABEL: {{^}}fneg_fabs_v2f64:
+; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
+; GCN-NOT: 0x80000000
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
%fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in)
%fsub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %fabs
@@ -80,13 +81,13 @@ define void @fneg_fabs_v2f64(<2 x double
ret void
}
-; FUNC-LABEL: {{^}}fneg_fabs_v4f64:
-; SI: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
-; SI-NOT: 0x80000000
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
-; SI: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN-LABEL: {{^}}fneg_fabs_v4f64:
+; GCN: v_bfrev_b32_e32 [[IMMREG:v[0-9]+]], 1{{$}}
+; GCN-NOT: 0x80000000
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
+; GCN: v_or_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, [[IMMREG]]
define void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
%fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in)
%fsub = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %fabs
Modified: llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll?rev=268260&r1=268259&r2=268260&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll Mon May 2 12:39:06 2016
@@ -2,18 +2,15 @@
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s
; FUNC-LABEL: {{^}}cluster_arg_loads:
-; FIXME: Due to changes in the load clustering heuristics. We now longer
-; cluster all argument loads together.
+; FIXME: Due to changes in the load clustering heuristics. We no longer
+; cluster all argument loads together on SI.
; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
-; VI-NEXT: s_nop 0
; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
-; VI-NEXT: s_nop 0
-; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
-; VI-NEXT: s_nop 0
+; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
store i32 %x, i32 addrspace(1)* %out0, align 4
More information about the llvm-commits
mailing list