[llvm] AMDGPU: Treat SWMMAC the same as MFMA and other WMMA for sched_barrier (PR #85721)

Changpeng Fang via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 18 17:37:09 PDT 2024


https://github.com/changpeng created https://github.com/llvm/llvm-project/pull/85721

None

>From 1d9c313fc8d9375bd0391db75f2dd3df8592ea13 Mon Sep 17 00:00:00 2001
From: Changpeng Fang <changpeng.fang at amd.com>
Date: Mon, 18 Mar 2024 17:33:39 -0700
Subject: [PATCH] AMDGPU: Treat SWMMAC the same as MFMA and other WMMA for
 sched_barrier

---
 llvm/lib/Target/AMDGPU/SIInstrInfo.h          |   2 +-
 .../llvm.amdgcn.sched.group.barrier.gfx12.ll  | 333 ++++++++++++++++++
 2 files changed, 334 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll

diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index a62bf779fe2e2d..4c5978cdc6665c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -815,7 +815,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   }
 
   static bool isMFMAorWMMA(const MachineInstr &MI) {
-    return isMFMA(MI) || isWMMA(MI);
+    return isMFMA(MI) || isWMMA(MI) || isSWMMAC(MI);
   }
 
   static bool isSWMMAC(const MachineInstr &MI) {
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
new file mode 100644
index 00000000000000..e33911d6fd28ab
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx12.ll
@@ -0,0 +1,333 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
+
+declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16..i16(<8 x half>, <16 x half>, <8 x half>, i16)
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_cluster(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v28, 4, v0
+; GCN-NEXT:    v_mov_b32_e32 v48, 0
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GCN-NEXT:    v_add_nc_u32_e32 v0, s0, v28
+; GCN-NEXT:    v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; GCN-NEXT:    ds_load_b128 v[8:11], v0
+; GCN-NEXT:    ds_load_b128 v[12:15], v0 offset:512
+; GCN-NEXT:    ds_load_b128 v[16:19], v0 offset:1536
+; GCN-NEXT:    ds_load_b128 v[20:23], v0 offset:3072
+; GCN-NEXT:    ds_load_b128 v[24:27], v0 offset:5120
+; GCN-NEXT:    ds_load_b128 v[4:7], v0 offset:11280
+; GCN-NEXT:    ds_load_b128 v[0:3], v0 offset:11264
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x6
+; GCN-NEXT:    v_mov_b32_e32 v31, v11
+; GCN-NEXT:    s_wait_dscnt 0x5
+; GCN-NEXT:    v_mov_b32_e32 v35, v15
+; GCN-NEXT:    s_wait_dscnt 0x4
+; GCN-NEXT:    v_mov_b32_e32 v39, v19
+; GCN-NEXT:    s_wait_dscnt 0x3
+; GCN-NEXT:    v_mov_b32_e32 v43, v23
+; GCN-NEXT:    s_wait_dscnt 0x2
+; GCN-NEXT:    v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; GCN-NEXT:    v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; GCN-NEXT:    v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; GCN-NEXT:    v_mov_b32_e32 v32, v12
+; GCN-NEXT:    v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; GCN-NEXT:    v_mov_b32_e32 v36, v16
+; GCN-NEXT:    v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; GCN-NEXT:    v_mov_b32_e32 v40, v20
+; GCN-NEXT:    v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; GCN-NEXT:    v_mov_b32_e32 v44, v24
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; GCN-NEXT:    ds_store_b128 v49, v[28:31]
+; GCN-NEXT:    ds_store_b128 v50, v[32:35] offset:512
+; GCN-NEXT:    ds_store_b128 v50, v[36:39] offset:1024
+; GCN-NEXT:    ds_store_b128 v50, v[40:43] offset:1536
+; GCN-NEXT:    ds_store_b128 v50, v[44:47] offset:2048
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_cluster:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v28, 4, v0
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v48, 0
+; EXACTCUTOFF-NEXT:    s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; EXACTCUTOFF-NEXT:    v_add_nc_u32_e32 v0, s0, v28
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v50, s1 :: v_dual_add_nc_u32 v49, s1, v28
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[8:11], v0
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[12:15], v0 offset:512
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[16:19], v0 offset:1536
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[20:23], v0 offset:3072
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[24:27], v0 offset:5120
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v0 offset:11280
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v0 offset:11264
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(7) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x6
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v31, v11
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x5
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v35, v15
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x4
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v39, v19
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x3
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v43, v23
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v47, v27 :: v_dual_mov_b32 v30, v10
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v29, v9 :: v_dual_mov_b32 v28, v8
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v34, v14 :: v_dual_mov_b32 v33, v13
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v32, v12
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v38, v18 :: v_dual_mov_b32 v37, v17
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v36, v16
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v42, v22 :: v_dual_mov_b32 v41, v21
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v40, v20
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v46, v26 :: v_dual_mov_b32 v45, v25
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v44, v24
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[28:31], v[8:11], v[0:7], v48
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[32:35], v[12:15], v[0:7], v48
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[36:39], v[16:19], v[0:7], v48
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[40:43], v[20:23], v[0:7], v48
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[44:47], v[24:27], v[0:7], v48
+; EXACTCUTOFF-NEXT:    ds_store_b128 v49, v[28:31]
+; EXACTCUTOFF-NEXT:    ds_store_b128 v50, v[32:35] offset:512
+; EXACTCUTOFF-NEXT:    ds_store_b128 v50, v[36:39] offset:1024
+; EXACTCUTOFF-NEXT:    ds_store_b128 v50, v[40:43] offset:1536
+; EXACTCUTOFF-NEXT:    ds_store_b128 v50, v[44:47] offset:2048
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %in, i32 %idx
+  %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+  %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 32
+  %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+  %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 64
+  %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+  %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 96
+  %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+  %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 128
+  %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+  %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %load.4.addr, i32 192
+  %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+  %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+  %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+  %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+  %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+  %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+  %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+  store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+  %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+  store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+  %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+  store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+  %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+  store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+  %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+  store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+  ; 7 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 7, i32 0)
+  ; 5 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 5, i32 0)
+  ; 5 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 5, i32 0)
+  ret void
+}
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_SWMMAC_interleaved(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v18, 0
+; GCN-NEXT:    s_wait_kmcnt 0x0
+; GCN-NEXT:    v_lshl_add_u32 v17, v0, 5, s0
+; GCN-NEXT:    v_lshl_add_u32 v0, v0, 4, s1
+; GCN-NEXT:    ds_load_b128 v[9:12], v17 offset:1024
+; GCN-NEXT:    ds_load_b128 v[1:4], v17
+; GCN-NEXT:    ds_load_b128 v[5:8], v17 offset:16
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x2
+; GCN-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v0, v[13:16]
+; GCN-NEXT:    ds_load_b128 v[9:12], v17 offset:2560
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v0, v[13:16] offset:512
+; GCN-NEXT:    ds_load_b128 v[9:12], v17 offset:4608
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v0, v[13:16] offset:1024
+; GCN-NEXT:    ds_load_b128 v[9:12], v17 offset:7168
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v0, v[13:16] offset:1536
+; GCN-NEXT:    ds_load_b128 v[9:12], v17 offset:10240
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; GCN-NEXT:    s_wait_dscnt 0x0
+; GCN-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; GCN-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v0, v[13:16] offset:2048
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_SWMMAC_interleaved:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v18, 0
+; EXACTCUTOFF-NEXT:    s_wait_kmcnt 0x0
+; EXACTCUTOFF-NEXT:    v_lshl_add_u32 v17, v0, 5, s0
+; EXACTCUTOFF-NEXT:    v_lshl_add_u32 v0, v0, 4, s1
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[9:12], v17 offset:1024
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[1:4], v17
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[5:8], v17 offset:16
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(3) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v0, v[13:16]
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[9:12], v17 offset:2560
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v0, s1
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v0, v[13:16] offset:512
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[9:12], v17 offset:4608
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v0, v[13:16] offset:1024
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[9:12], v17 offset:7168
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v0, v[13:16] offset:1536
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[9:12], v17 offset:10240
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_wait_dscnt 0x0
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v15, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v14, v10 :: v_dual_mov_b32 v13, v9
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_swmmac_f16_16x16x32_f16 v[13:16], v[9:12], v[1:8], v18
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v0, v[13:16] offset:2048
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %load.b.addr = getelementptr <16 x half>, ptr addrspace(3) %in, i32 %idx
+  %load.b = load <16 x half>, ptr addrspace(3) %load.b.addr
+  %load.0.addr = getelementptr <8 x half>, ptr addrspace(3) %load.b.addr, i32 64
+  %load.0 = load <8 x half>, ptr addrspace(3) %load.0.addr
+  %load.1.addr = getelementptr <8 x half>, ptr addrspace(3) %load.0.addr, i32 96
+  %load.1 = load <8 x half>, ptr addrspace(3) %load.1.addr
+  %load.2.addr = getelementptr <8 x half>, ptr addrspace(3) %load.1.addr, i32 128
+  %load.2 = load <8 x half>, ptr addrspace(3) %load.2.addr
+  %load.3.addr = getelementptr <8 x half>, ptr addrspace(3) %load.2.addr, i32 160
+  %load.3 = load <8 x half>, ptr addrspace(3) %load.3.addr
+  %load.4.addr = getelementptr <8 x half>, ptr addrspace(3) %load.3.addr, i32 192
+  %load.4 = load <8 x half>, ptr addrspace(3) %load.4.addr
+  %mai.0 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.0, <16 x half> %load.b, <8 x half> %load.0, i1 0)
+  %mai.1 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.1, <16 x half> %load.b, <8 x half> %load.1, i1 0)
+  %mai.2 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.2, <16 x half> %load.b, <8 x half> %load.2, i1 0)
+  %mai.3 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.3, <16 x half> %load.b, <8 x half> %load.3, i1 0)
+  %mai.4 = call <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.i16(<8 x half> %load.4, <16 x half> %load.b, <8 x half> %load.4, i1 0)
+  %store.0.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 %idx
+  store <8 x half> %mai.0, ptr addrspace(3) %store.0.addr
+  %store.1.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 32
+  store <8 x half> %mai.1, ptr addrspace(3) %store.1.addr
+  %store.2.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 64
+  store <8 x half> %mai.2, ptr addrspace(3) %store.2.addr
+  %store.3.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 96
+  store <8 x half> %mai.3, ptr addrspace(3) %store.3.addr
+  %store.4.addr = getelementptr <8 x half>, ptr addrspace(3) %out, i32 128
+  store <8 x half> %mai.4, ptr addrspace(3) %store.4.addr
+  ; 3 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 3, i32 0)
+  ; 1 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 1 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+  ; 1 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+  ; 1 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 1 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+  ; 1 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+  ; 1 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 1 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+  ; 1 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+  ; 1 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 1 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+  ; 1 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
+  ; 1 SWMMAC
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 1 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
+  ret void
+}



More information about the llvm-commits mailing list