[llvm] [AMDGPU] Schedule independent instruction between s_barrier_signal and s_barrier_wait (PR #172057)
Dark Steve via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 12 10:07:41 PST 2025
https://github.com/PrasoonMishra updated https://github.com/llvm/llvm-project/pull/172057
>From a4861bbf59820887882d050ed65762f0a2596211 Mon Sep 17 00:00:00 2001
From: Prasoon Mishra <Prasoon.Mishra at amd.com>
Date: Fri, 12 Dec 2025 13:20:01 +0000
Subject: [PATCH 1/2] [AMDGPU] Add synthetic latency between s_barrier_signal
and s_barrier_wait
Add synthetic latency to the signal-wait barrier edge to encourage latency
hiding. Independent instructions are scheduled in the gap between split
barrier signal and wait.
The latency is tunable via -amdgpu-barrier-signal-wait-latency.
---
.../Target/AMDGPU/AMDGPUBarrierLatency.cpp | 96 ++++++---
.../AMDGPU/barrier-signal-wait-latency.ll | 197 ++++++++++++++++++
.../CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll | 42 ++--
llvm/test/CodeGen/AMDGPU/s-barrier.ll | 2 +-
4 files changed, 282 insertions(+), 55 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
index 2e586ea207af5..39f6f409c8ebe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
@@ -6,14 +6,17 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file This file contains a DAG scheduling mutation to add latency to
-/// barrier edges between ATOMIC_FENCE instructions and preceding
-/// memory accesses potentially affected by the fence.
-/// This encourages the scheduling of more instructions before
-/// ATOMIC_FENCE instructions. ATOMIC_FENCE instructions may
-/// introduce wait counting or indicate an impending S_BARRIER
-/// wait. Having more instructions in-flight across these
-/// constructs improves latency hiding.
+/// \file This file contains a DAG scheduling mutation to add latency to:
+/// 1. Barrier edges between ATOMIC_FENCE instructions and preceding
+/// memory accesses potentially affected by the fence.
+/// This encourages the scheduling of more instructions before
+/// ATOMIC_FENCE instructions. ATOMIC_FENCE instructions may
+/// introduce wait counting or indicate an impending S_BARRIER
+/// wait. Having more instructions in-flight across these
+/// constructs improves latency hiding.
+/// 2. Barrier edges from S_BARRIER_SIGNAL to S_BARRIER_WAIT.
+/// This encourages independent work to be scheduled between
+/// signal and wait, hiding barrier synchronization latency.
//
//===----------------------------------------------------------------------===//
@@ -21,9 +24,16 @@
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIInstrInfo.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
+static cl::opt<unsigned> BarrierSignalWaitLatencyOpt(
+ "amdgpu-barrier-signal-wait-latency",
+ cl::desc("Synthetic latency between S_BARRIER_SIGNAL and S_BARRIER_WAIT "
+ "to encourage scheduling independent work between them"),
+ cl::init(16), cl::Hidden);
+
namespace {
class BarrierLatency : public ScheduleDAGMutation {
@@ -41,38 +51,58 @@ class BarrierLatency : public ScheduleDAGMutation {
void apply(ScheduleDAGInstrs *DAG) override;
};
+void addLatencyToEdge(SDep &PredDep, SUnit &SU, unsigned Latency) {
+ SUnit *PredSU = PredDep.getSUnit();
+ SDep ForwardD = PredDep;
+ ForwardD.setSUnit(&SU);
+ for (SDep &SuccDep : PredSU->Succs) {
+ if (SuccDep == ForwardD) {
+ SuccDep.setLatency(SuccDep.getLatency() + Latency);
+ break;
+ }
+ }
+ PredDep.setLatency(PredDep.getLatency() + Latency);
+ PredSU->setDepthDirty();
+ SU.setDepthDirty();
+}
+
void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
- constexpr unsigned SyntheticLatency = 2000;
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(DAG->TII);
+ constexpr unsigned FenceLatency = 2000;
+ const unsigned BarrierSignalWaitLatency = BarrierSignalWaitLatencyOpt;
+
for (SUnit &SU : DAG->SUnits) {
const MachineInstr *MI = SU.getInstr();
- if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
- continue;
+ unsigned Op = MI->getOpcode();
- // Update latency on barrier edges of ATOMIC_FENCE.
- // Ignore scopes not expected to have any latency.
- SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
- if (IgnoredScopes.contains(SSID))
- continue;
-
- for (SDep &PredDep : SU.Preds) {
- if (!PredDep.isBarrier())
- continue;
- SUnit *PredSU = PredDep.getSUnit();
- MachineInstr *MI = PredSU->getInstr();
- // Only consider memory loads
- if (!MI->mayLoad() || MI->mayStore())
+ if (Op == AMDGPU::ATOMIC_FENCE) {
+ // Update latency on barrier edges of ATOMIC_FENCE.
+ // Ignore scopes not expected to have any latency.
+ SyncScope::ID SSID =
+ static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
+ if (IgnoredScopes.contains(SSID))
continue;
- SDep ForwardD = PredDep;
- ForwardD.setSUnit(&SU);
- for (SDep &SuccDep : PredSU->Succs) {
- if (SuccDep == ForwardD) {
- SuccDep.setLatency(SuccDep.getLatency() + SyntheticLatency);
- break;
+
+ for (SDep &PredDep : SU.Preds) {
+ if (!PredDep.isBarrier())
+ continue;
+ SUnit *PredSU = PredDep.getSUnit();
+ MachineInstr *MI = PredSU->getInstr();
+ // Only consider memory loads
+ if (!MI->mayLoad() || MI->mayStore())
+ continue;
+ addLatencyToEdge(PredDep, SU, FenceLatency);
+ }
+ }
+
+ else if (Op == AMDGPU::S_BARRIER_WAIT) {
+ for (SDep &PredDep : SU.Preds) {
+ SUnit *PredSU = PredDep.getSUnit();
+ const MachineInstr *PredMI = PredSU->getInstr();
+ if (TII->isBarrierStart(PredMI->getOpcode())) {
+ addLatencyToEdge(PredDep, SU, BarrierSignalWaitLatency);
}
}
- PredDep.setLatency(PredDep.getLatency() + SyntheticLatency);
- PredSU->setDepthDirty();
- SU.setDepthDirty();
}
}
}
diff --git a/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll b/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll
new file mode 100644
index 0000000000000..0b512b00c3fbc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck --check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-barrier-signal-wait-latency=0 < %s | FileCheck --check-prefix=NOOPT %s
+
+; Tests for scheduling independent work between s_barrier_signal and s_barrier_wait
+; for latency hiding.
+
+; Independent work should be scheduled between signal/wait
+define amdgpu_kernel void @test_barrier_independent_valu(ptr addrspace(1) %out, i32 %size) #0 {
+; OPT-LABEL: test_barrier_independent_valu:
+; OPT: ; %bb.0: ; %entry
+; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1
+; OPT-NEXT: s_wait_kmcnt 0x0
+; OPT-NEXT: v_xad_u32 v0, v1, -1, s2
+; OPT-NEXT: global_store_b32 v2, v1, s[0:1]
+; OPT-NEXT: s_barrier_signal -1
+; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; OPT-NEXT: s_barrier_wait -1
+; OPT-NEXT: global_load_b32 v0, v[0:1], off
+; OPT-NEXT: s_wait_loadcnt 0x0
+; OPT-NEXT: global_store_b32 v2, v0, s[0:1]
+; OPT-NEXT: s_endpgm
+;
+; NOOPT-LABEL: test_barrier_independent_valu:
+; NOOPT: ; %bb.0: ; %entry
+; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2
+; NOOPT-NEXT: s_wait_kmcnt 0x0
+; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2
+; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT: s_barrier_signal -1
+; NOOPT-NEXT: s_barrier_wait -1
+; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; NOOPT-NEXT: global_load_b32 v0, v[0:1], off
+; NOOPT-NEXT: s_wait_loadcnt 0x0
+; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1]
+; NOOPT-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
+ store i32 %tid, ptr addrspace(1) %addr
+ call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+ call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+ %idx_base = sub i32 %size, 1
+ %idx = sub i32 %idx_base, %tid
+ %read_addr = getelementptr i32, ptr addrspace(1) %out, i32 %idx
+ %val = load i32, ptr addrspace(1) %read_addr
+ store i32 %val, ptr addrspace(1) %addr
+ ret void
+}
+
+; No independent work - signal/wait should stay adjacent
+define amdgpu_kernel void @test_barrier_no_independent_work(ptr addrspace(3) %lds) #0 {
+; OPT-LABEL: test_barrier_no_independent_work:
+; OPT: ; %bb.0: ; %entry
+; OPT-NEXT: s_load_b32 s0, s[4:5], 0x24
+; OPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; OPT-NEXT: s_wait_kmcnt 0x0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0
+; OPT-NEXT: ds_store_b32 v1, v0
+; OPT-NEXT: s_barrier_signal -1
+; OPT-NEXT: s_barrier_wait -1
+; OPT-NEXT: ds_load_b32 v0, v1
+; OPT-NEXT: s_wait_dscnt 0x0
+; OPT-NEXT: ds_store_b32 v1, v0 offset:4
+; OPT-NEXT: s_endpgm
+;
+; NOOPT-LABEL: test_barrier_no_independent_work:
+; NOOPT: ; %bb.0: ; %entry
+; NOOPT-NEXT: s_load_b32 s0, s[4:5], 0x24
+; NOOPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; NOOPT-NEXT: s_wait_kmcnt 0x0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0
+; NOOPT-NEXT: ds_store_b32 v1, v0
+; NOOPT-NEXT: s_barrier_signal -1
+; NOOPT-NEXT: s_barrier_wait -1
+; NOOPT-NEXT: ds_load_b32 v0, v1
+; NOOPT-NEXT: s_wait_dscnt 0x0
+; NOOPT-NEXT: ds_store_b32 v1, v0 offset:4
+; NOOPT-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %addr = getelementptr i32, ptr addrspace(3) %lds, i32 %tid
+ store i32 %tid, ptr addrspace(3) %addr
+ call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+ call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+ %val = load i32, ptr addrspace(3) %addr
+ %next = add i32 %tid, 1
+ %next_addr = getelementptr i32, ptr addrspace(3) %lds, i32 %next
+ store i32 %val, ptr addrspace(3) %next_addr
+ ret void
+}
+
+; Multiple barriers
+define amdgpu_kernel void @test_barrier_multiple(ptr addrspace(1) %out, i32 %size) #0 {
+; OPT-LABEL: test_barrier_multiple:
+; OPT: ; %bb.0: ; %entry
+; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1
+; OPT-NEXT: s_wait_kmcnt 0x0
+; OPT-NEXT: v_xad_u32 v0, v1, -1, s2
+; OPT-NEXT: global_store_b32 v2, v1, s[0:1]
+; OPT-NEXT: s_barrier_signal -1
+; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
+; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; OPT-NEXT: s_barrier_wait -1
+; OPT-NEXT: global_load_b32 v3, v[0:1], off
+; OPT-NEXT: s_wait_loadcnt 0x0
+; OPT-NEXT: global_store_b32 v2, v3, s[0:1]
+; OPT-NEXT: s_barrier_signal -1
+; OPT-NEXT: s_barrier_wait -1
+; OPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4
+; OPT-NEXT: s_wait_loadcnt 0x0
+; OPT-NEXT: global_store_b32 v2, v0, s[0:1]
+; OPT-NEXT: s_endpgm
+;
+; NOOPT-LABEL: test_barrier_multiple:
+; NOOPT: ; %bb.0: ; %entry
+; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2
+; NOOPT-NEXT: s_wait_kmcnt 0x0
+; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2
+; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT: s_barrier_signal -1
+; NOOPT-NEXT: s_barrier_wait -1
+; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
+; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; NOOPT-NEXT: global_load_b32 v2, v[0:1], off
+; NOOPT-NEXT: s_wait_loadcnt 0x0
+; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT: s_barrier_signal -1
+; NOOPT-NEXT: s_barrier_wait -1
+; NOOPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4
+; NOOPT-NEXT: s_wait_loadcnt 0x0
+; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1]
+; NOOPT-NEXT: s_endpgm
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
+ store i32 %tid, ptr addrspace(1) %addr
+
+ call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+ call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+
+ %idx1_base = sub i32 %size, 1
+ %idx1 = sub i32 %idx1_base, %tid
+ %read_addr1 = getelementptr i32, ptr addrspace(1) %out, i32 %idx1
+ %val1 = load i32, ptr addrspace(1) %read_addr1
+ store i32 %val1, ptr addrspace(1) %addr
+
+ call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+ call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+
+ %idx2_base = sub i32 %size, 2
+ %idx2 = sub i32 %idx2_base, %tid
+ %read_addr2 = getelementptr i32, ptr addrspace(1) %out, i32 %idx2
+ %val2 = load i32, ptr addrspace(1) %read_addr2
+ store i32 %val2, ptr addrspace(1) %addr
+ ret void
+}
+
+declare void @llvm.amdgcn.s.barrier.signal(i32) #1
+declare void @llvm.amdgcn.s.barrier.wait(i16) #1
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
+attributes #0 = { nounwind }
+attributes #1 = { convergent nounwind }
+attributes #2 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
index 9003251253740..139c698873d67 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
@@ -92,68 +92,68 @@ define amdgpu_kernel void @test_barrier(ptr addrspace(1) %out, i32 %size) #0 {
; VARIANT4-LABEL: test_barrier:
; VARIANT4: ; %bb.0: ; %entry
; VARIANT4-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
-; VARIANT4-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; VARIANT4-NEXT: v_and_b32_e32 v1, 0x3ff, v0
; VARIANT4-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; VARIANT4-NEXT: v_lshlrev_b32_e32 v3, 2, v2
+; VARIANT4-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; VARIANT4-NEXT: s_wait_kmcnt 0x0
-; VARIANT4-NEXT: v_xad_u32 v0, v2, -1, s2
-; VARIANT4-NEXT: global_store_b32 v3, v2, s[0:1]
+; VARIANT4-NEXT: v_xad_u32 v0, v1, -1, s2
+; VARIANT4-NEXT: global_store_b32 v2, v1, s[0:1]
; VARIANT4-NEXT: s_barrier_signal -1
-; VARIANT4-NEXT: s_barrier_wait -1
; VARIANT4-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; VARIANT4-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; VARIANT4-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; VARIANT4-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; VARIANT4-NEXT: s_delay_alu instid0(VALU_DEP_1)
; VARIANT4-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; VARIANT4-NEXT: s_barrier_wait -1
; VARIANT4-NEXT: global_load_b32 v0, v[0:1], off
; VARIANT4-NEXT: s_wait_loadcnt 0x0
-; VARIANT4-NEXT: global_store_b32 v3, v0, s[0:1]
+; VARIANT4-NEXT: global_store_b32 v2, v0, s[0:1]
; VARIANT4-NEXT: s_endpgm
;
; VARIANT5-LABEL: test_barrier:
; VARIANT5: ; %bb.0: ; %entry
; VARIANT5-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
-; VARIANT5-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; VARIANT5-NEXT: v_and_b32_e32 v1, 0x3ff, v0
; VARIANT5-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; VARIANT5-NEXT: v_lshlrev_b32_e32 v3, 2, v2
+; VARIANT5-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; VARIANT5-NEXT: s_wait_kmcnt 0x0
-; VARIANT5-NEXT: v_xad_u32 v0, v2, -1, s2
-; VARIANT5-NEXT: global_store_b32 v3, v2, s[0:1]
+; VARIANT5-NEXT: v_xad_u32 v0, v1, -1, s2
+; VARIANT5-NEXT: global_store_b32 v2, v1, s[0:1]
; VARIANT5-NEXT: s_barrier_signal -1
-; VARIANT5-NEXT: s_barrier_wait -1
; VARIANT5-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; VARIANT5-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; VARIANT5-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
; VARIANT5-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0
; VARIANT5-NEXT: s_delay_alu instid0(VALU_DEP_1)
; VARIANT5-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; VARIANT5-NEXT: s_barrier_wait -1
; VARIANT5-NEXT: global_load_b32 v0, v[0:1], off
; VARIANT5-NEXT: s_wait_loadcnt 0x0
-; VARIANT5-NEXT: global_store_b32 v3, v0, s[0:1]
+; VARIANT5-NEXT: global_store_b32 v2, v0, s[0:1]
; VARIANT5-NEXT: s_endpgm
;
; VARIANT6-LABEL: test_barrier:
; VARIANT6: ; %bb.0: ; %entry
; VARIANT6-NEXT: s_load_b96 s[0:2], s[4:5], 0x24
+; VARIANT6-NEXT: v_and_b32_e32 v1, 0x3ff, v0
; VARIANT6-NEXT: s_wait_kmcnt 0x0
-; VARIANT6-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_and_b32 v4, 0x3ff, v0
; VARIANT6-NEXT: s_add_co_i32 s2, s2, -1
-; VARIANT6-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; VARIANT6-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_lshlrev_b32 v5, 2, v4
-; VARIANT6-NEXT: v_sub_nc_u32_e32 v0, s2, v4
-; VARIANT6-NEXT: global_store_b32 v5, v4, s[0:1]
+; VARIANT6-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; VARIANT6-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; VARIANT6-NEXT: v_lshlrev_b32_e32 v4, 2, v1
+; VARIANT6-NEXT: v_sub_nc_u32_e32 v0, s2, v1
+; VARIANT6-NEXT: global_store_b32 v4, v1, s[0:1]
; VARIANT6-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; VARIANT6-NEXT: s_barrier_signal -1
-; VARIANT6-NEXT: s_barrier_wait -1
-; VARIANT6-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; VARIANT6-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; VARIANT6-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; VARIANT6-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
-; VARIANT6-NEXT: s_delay_alu instid0(VALU_DEP_1)
; VARIANT6-NEXT: v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
+; VARIANT6-NEXT: s_barrier_wait -1
; VARIANT6-NEXT: global_load_b32 v0, v[0:1], off
; VARIANT6-NEXT: s_wait_loadcnt 0x0
-; VARIANT6-NEXT: global_store_b32 v5, v0, s[0:1]
+; VARIANT6-NEXT: global_store_b32 v4, v0, s[0:1]
; VARIANT6-NEXT: s_endpgm
entry:
%tmp = call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/s-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
index 6e51c870cd487..35b86998c9cac 100644
--- a/llvm/test/CodeGen/AMDGPU/s-barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
@@ -99,8 +99,8 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in)
; GFX12-SDAG-NEXT: s_mov_b32 m0, s2
; GFX12-SDAG-NEXT: s_barrier_signal -1
; GFX12-SDAG-NEXT: s_barrier_join m0
-; GFX12-SDAG-NEXT: s_mov_b32 m0, 2
; GFX12-SDAG-NEXT: s_barrier_signal_isfirst -1
+; GFX12-SDAG-NEXT: s_mov_b32 m0, 2
; GFX12-SDAG-NEXT: s_barrier_wait 1
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: s_barrier_leave
>From 54f9e18513ce02e7899e416e3f2aae27dd33f125 Mon Sep 17 00:00:00 2001
From: Prasoon Mishra <Prasoon.Mishra at amd.com>
Date: Fri, 12 Dec 2025 18:07:21 +0000
Subject: [PATCH 2/2] Apply clang format.
---
llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
index 39f6f409c8ebe..7e033914aeeaa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
@@ -67,7 +67,7 @@ void addLatencyToEdge(SDep &PredDep, SUnit &SU, unsigned Latency) {
}
void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
- const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(DAG->TII);
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII);
constexpr unsigned FenceLatency = 2000;
const unsigned BarrierSignalWaitLatency = BarrierSignalWaitLatencyOpt;
More information about the llvm-commits
mailing list