[llvm] [AMDGPU] Make S_WAIT_EVENT a scheduling boundary (PR #129032)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 12 07:30:04 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Jay Foad (jayfoad)
<details>
<summary>Changes</summary>
---
Full diff: https://github.com/llvm/llvm-project/pull/129032.diff
2 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+7)
- (added) llvm/test/CodeGen/AMDGPU/sched-wait-event.ll (+59)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d5d54337306c0..b980986b9da6e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4214,6 +4214,13 @@ bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
if (MI.getOpcode() == AMDGPU::SCHED_BARRIER && MI.getOperand(0).getImm() == 0)
return true;
+ // The scheduler does not understand what kind of external events this
+ // instruction waits for, so cannot do a good job of scheduling it. Making it
+ // a boundary allows front ends to insert it at an appropriate place without
+ // the scheduler arbitrarily moving it.
+ if (MI.getOpcode() == AMDGPU::S_WAIT_EVENT)
+ return true;
+
// Target-independent instructions do not have an implicit-use of EXEC, even
// when they operate on VGPRs. Treating EXEC modifications as scheduling
// boundaries prevents incorrect movements of such instructions.
diff --git a/llvm/test/CodeGen/AMDGPU/sched-wait-event.ll b/llvm/test/CodeGen/AMDGPU/sched-wait-event.ll
new file mode 100644
index 0000000000000..c9c039c414655
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sched-wait-event.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12
+
+; Check that the scheduler does not hoist the s_wait_event above the
+; interpolation calculations.
+define amdgpu_ps void @test_wait_event(i32 inreg %arg, float %arg1, float %arg2, <8 x i32> inreg %arg3) {
+; GFX12-LABEL: test_wait_event:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_mov_b32 s11, s8
+; GFX12-NEXT: s_mov_b32 m0, s0
+; GFX12-NEXT: s_mov_b32 s0, exec_lo
+; GFX12-NEXT: s_wqm_b32 exec_lo, exec_lo
+; GFX12-NEXT: ds_param_load v2, attr1.x wait_va_vdst:15 wait_vm_vsrc:1
+; GFX12-NEXT: ds_param_load v3, attr1.y wait_va_vdst:15 wait_vm_vsrc:1
+; GFX12-NEXT: s_mov_b32 s10, s7
+; GFX12-NEXT: s_mov_b32 s9, s6
+; GFX12-NEXT: s_mov_b32 s8, s5
+; GFX12-NEXT: s_mov_b32 s7, s4
+; GFX12-NEXT: s_mov_b32 s6, s3
+; GFX12-NEXT: s_mov_b32 s5, s2
+; GFX12-NEXT: s_mov_b32 s4, s1
+; GFX12-NEXT: s_mov_b32 exec_lo, s0
+; GFX12-NEXT: v_interp_p10_f32 v4, v2, v1, v2 wait_exp:1
+; GFX12-NEXT: v_interp_p10_f32 v1, v3, v1, v3 wait_exp:0
+; GFX12-NEXT: v_interp_p2_f32 v4, v2, v0, v4 wait_exp:7
+; GFX12-NEXT: v_interp_p2_f32 v0, v3, v0, v1 wait_exp:7
+; GFX12-NEXT: v_mul_f32_e32 v1, 0x44800000, v4
+; GFX12-NEXT: v_mul_f32_e32 v0, 0x44800000, v0
+; GFX12-NEXT: v_cvt_i32_f32_e32 v1, v1
+; GFX12-NEXT: v_cvt_i32_f32_e32 v0, v0
+; GFX12-NEXT: s_wait_event 0x2
+; GFX12-NEXT: image_load v[4:7], [v1, v0], s[4:11] dmask:0xf dim:SQ_RSRC_IMG_2D
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_dual_mov_b32 v8, 0 :: v_dual_mul_f32 v7, 0.5, v7
+; GFX12-NEXT: v_dual_mul_f32 v6, 0.5, v6 :: v_dual_mul_f32 v5, 0.5, v5
+; GFX12-NEXT: v_mul_f32_e32 v4, 0.5, v4
+; GFX12-NEXT: image_store v[4:7], [v1, v0], s[4:11] dmask:0xf dim:SQ_RSRC_IMG_2D
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: export mrt0 v8, v8, v8, v8 done
+; GFX12-NEXT: s_endpgm
+bb:
+ %i = call float @llvm.amdgcn.lds.param.load(i32 0, i32 1, i32 %arg)
+ %i4 = call float @llvm.amdgcn.interp.inreg.p10(float %i, float %arg2, float %i)
+ %i5 = call float @llvm.amdgcn.interp.inreg.p2(float %i, float %arg1, float %i4)
+ %i6 = call float @llvm.amdgcn.lds.param.load(i32 1, i32 1, i32 %arg)
+ %i7 = call float @llvm.amdgcn.interp.inreg.p10(float %i6, float %arg2, float %i6)
+ %i8 = call float @llvm.amdgcn.interp.inreg.p2(float %i6, float %arg1, float %i7)
+ %i9 = fmul float %i5, 1024.0
+ %i10 = fmul float %i8, 1024.0
+ %i11 = fptosi float %i9 to i32
+ %i12 = fptosi float %i10 to i32
+ call void @llvm.amdgcn.s.wait.event.export.ready()
+ %i13 = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32.v8i32(i32 15, i32 %i11, i32 %i12, <8 x i32> %arg3, i32 0, i32 0)
+ %i14 = fmul <4 x float> %i13, splat (float 0.5)
+ call void @llvm.amdgcn.image.store.2d.v4f32.i32.v8i32(<4 x float> %i14, i32 15, i32 %i11, i32 %i12, <8 x i32> %arg3, i32 0, i32 0)
+ fence syncscope("agent") release
+ call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float 0.0, float 0.0, float 0.0, float 0.0, i1 true, i1 true)
+ ret void
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/129032
More information about the llvm-commits
mailing list