[llvm] 63e7e9c - [AMDGPU] Treat WMMA the same as MFMA for sched_barrier

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 19 11:05:50 PST 2023


Author: Stanislav Mekhanoshin
Date: 2023-01-19T10:52:31-08:00
New Revision: 63e7e9c8756aeaa6dccd4620cba710c04e215934

URL: https://github.com/llvm/llvm-project/commit/63e7e9c8756aeaa6dccd4620cba710c04e215934
DIFF: https://github.com/llvm/llvm-project/commit/63e7e9c8756aeaa6dccd4620cba710c04e215934.diff

LOG: [AMDGPU] Treat WMMA the same as MFMA for sched_barrier

MFMA and WMMA essentially the same thing, but apear on different ASICs.

Differential Revision: https://reviews.llvm.org/D142062

Added: 
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsAMDGPU.td
    llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index e621bcd4c8419..365e51c1bd224 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -232,7 +232,7 @@ def int_amdgcn_wave_barrier : ClangBuiltin<"__builtin_amdgcn_wave_barrier">,
 //                         scheduled across SCHED_BARRIER, i.e. allow ALU instructions to pass.
 //     MASK = 0x0000 0002: VALU instructions may be scheduled across SCHED_BARRIER.
 //     MASK = 0x0000 0004: SALU instructions may be scheduled across SCHED_BARRIER.
-//     MASK = 0x0000 0008: MFMA instructions may be scheduled across SCHED_BARRIER.
+//     MASK = 0x0000 0008: MFMA/WMMA instructions may be scheduled across SCHED_BARRIER.
 //     MASK = 0x0000 0010: ALL VMEM instructions may be scheduled across SCHED_BARRIER.
 //     MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER.
 //     MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER.

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
index 806e96441b055..fc0df61952e48 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
@@ -765,7 +765,7 @@ void MFMASmallGemmOpt::applyIGLPStrategy(
   // Count the number of MFMA instructions.
   unsigned MFMACount = 0;
   for (const MachineInstr &I : *DAG)
-    if (TII->isMFMA(I))
+    if (TII->isMFMAorWMMA(I))
       ++MFMACount;
 
   const unsigned PipelineSyncID = 0;
@@ -848,11 +848,11 @@ bool SchedGroup::canAddMI(const MachineInstr &MI) const {
     Result = false;
 
   else if (((SGMask & SchedGroupMask::ALU) != SchedGroupMask::NONE) &&
-           (TII->isVALU(MI) || TII->isMFMA(MI) || TII->isSALU(MI)))
+           (TII->isVALU(MI) || TII->isMFMAorWMMA(MI) || TII->isSALU(MI)))
     Result = true;
 
   else if (((SGMask & SchedGroupMask::VALU) != SchedGroupMask::NONE) &&
-           TII->isVALU(MI) && !TII->isMFMA(MI))
+           TII->isVALU(MI) && !TII->isMFMAorWMMA(MI))
     Result = true;
 
   else if (((SGMask & SchedGroupMask::SALU) != SchedGroupMask::NONE) &&
@@ -860,7 +860,7 @@ bool SchedGroup::canAddMI(const MachineInstr &MI) const {
     Result = true;
 
   else if (((SGMask & SchedGroupMask::MFMA) != SchedGroupMask::NONE) &&
-           TII->isMFMA(MI))
+           TII->isMFMAorWMMA(MI))
     Result = true;
 
   else if (((SGMask & SchedGroupMask::VMEM) != SchedGroupMask::NONE) &&

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 48d21d719747a..6cbc02ab1dbc5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -683,6 +683,10 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
     return get(Opcode).TSFlags & SIInstrFlags::IsWMMA;
   }
 
+  static bool isMFMAorWMMA(const MachineInstr &MI) {
+    return isMFMA(MI) || isWMMA(MI);
+  }
+
   bool isDOT(uint16_t Opcode) const {
     return get(Opcode).TSFlags & SIInstrFlags::IsDOT;
   }

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll
new file mode 100644
index 0000000000000..990a4e92170f0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sched.group.barrier.gfx11.ll
@@ -0,0 +1,403 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -misched-cluster=0 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -misched-cluster=0 -amdgpu-igrouplp-exact-solver-max-branches=250000 < %s | FileCheck -check-prefix=EXACTCUTOFF %s
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_cluster(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_WMMA_cluster:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v40, 5, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_add_nc_u32_e32 v32, s0, v40
+; GCN-NEXT:    v_dual_mov_b32 v81, s1 :: v_dual_add_nc_u32 v80, s1, v40
+; GCN-NEXT:    ds_load_b128 v[4:7], v32 offset:16
+; GCN-NEXT:    ds_load_b128 v[12:15], v32 offset:2064
+; GCN-NEXT:    ds_load_b128 v[20:23], v32 offset:6160
+; GCN-NEXT:    ds_load_b128 v[28:31], v32 offset:12304
+; GCN-NEXT:    ds_load_b128 v[36:39], v32 offset:20496
+; GCN-NEXT:    ds_load_b128 v[0:3], v32
+; GCN-NEXT:    ds_load_b128 v[8:11], v32 offset:2048
+; GCN-NEXT:    ds_load_b128 v[16:19], v32 offset:6144
+; GCN-NEXT:    ds_load_b128 v[24:27], v32 offset:12288
+; GCN-NEXT:    ds_load_b128 v[32:35], v32 offset:20480
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(10) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(4)
+; GCN-NEXT:    v_mov_b32_e32 v47, v7
+; GCN-NEXT:    s_waitcnt lgkmcnt(3)
+; GCN-NEXT:    v_mov_b32_e32 v55, v15
+; GCN-NEXT:    s_waitcnt lgkmcnt(2)
+; GCN-NEXT:    v_mov_b32_e32 v63, v23
+; GCN-NEXT:    s_waitcnt lgkmcnt(1)
+; GCN-NEXT:    v_mov_b32_e32 v71, v31
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v79, v39 :: v_dual_mov_b32 v46, v6
+; GCN-NEXT:    v_dual_mov_b32 v45, v5 :: v_dual_mov_b32 v44, v4
+; GCN-NEXT:    v_dual_mov_b32 v43, v3 :: v_dual_mov_b32 v42, v2
+; GCN-NEXT:    v_dual_mov_b32 v41, v1 :: v_dual_mov_b32 v40, v0
+; GCN-NEXT:    v_dual_mov_b32 v54, v14 :: v_dual_mov_b32 v53, v13
+; GCN-NEXT:    v_dual_mov_b32 v52, v12 :: v_dual_mov_b32 v51, v11
+; GCN-NEXT:    v_dual_mov_b32 v50, v10 :: v_dual_mov_b32 v49, v9
+; GCN-NEXT:    v_mov_b32_e32 v48, v8
+; GCN-NEXT:    v_dual_mov_b32 v62, v22 :: v_dual_mov_b32 v61, v21
+; GCN-NEXT:    v_dual_mov_b32 v60, v20 :: v_dual_mov_b32 v59, v19
+; GCN-NEXT:    v_dual_mov_b32 v58, v18 :: v_dual_mov_b32 v57, v17
+; GCN-NEXT:    v_mov_b32_e32 v56, v16
+; GCN-NEXT:    v_dual_mov_b32 v70, v30 :: v_dual_mov_b32 v69, v29
+; GCN-NEXT:    v_dual_mov_b32 v68, v28 :: v_dual_mov_b32 v67, v27
+; GCN-NEXT:    v_dual_mov_b32 v66, v26 :: v_dual_mov_b32 v65, v25
+; GCN-NEXT:    v_mov_b32_e32 v64, v24
+; GCN-NEXT:    v_dual_mov_b32 v78, v38 :: v_dual_mov_b32 v77, v37
+; GCN-NEXT:    v_dual_mov_b32 v76, v36 :: v_dual_mov_b32 v75, v35
+; GCN-NEXT:    v_dual_mov_b32 v74, v34 :: v_dual_mov_b32 v73, v33
+; GCN-NEXT:    v_mov_b32_e32 v72, v32
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[40:47], v[0:7], v[0:7], v[40:47]
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[48:55], v[8:15], v[8:15], v[48:55]
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[56:63], v[16:23], v[16:23], v[56:63]
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[64:71], v[24:31], v[24:31], v[64:71]
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[72:79], v[32:39], v[32:39], v[72:79]
+; GCN-NEXT:    ds_store_b128 v80, v[44:47] offset:16
+; GCN-NEXT:    ds_store_b128 v80, v[40:43]
+; GCN-NEXT:    ds_store_b128 v81, v[52:55] offset:2064
+; GCN-NEXT:    ds_store_b128 v81, v[48:51] offset:2048
+; GCN-NEXT:    ds_store_b128 v81, v[60:63] offset:4112
+; GCN-NEXT:    ds_store_b128 v81, v[56:59] offset:4096
+; GCN-NEXT:    ds_store_b128 v81, v[68:71] offset:6160
+; GCN-NEXT:    ds_store_b128 v81, v[64:67] offset:6144
+; GCN-NEXT:    ds_store_b128 v81, v[76:79] offset:8208
+; GCN-NEXT:    ds_store_b128 v81, v[72:75] offset:8192
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(10) SyncID(0)
+; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_WMMA_cluster:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v40, 5, v0
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_add_nc_u32_e32 v32, s0, v40
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v81, s1 :: v_dual_add_nc_u32 v80, s1, v40
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v32 offset:16
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[12:15], v32 offset:2064
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[20:23], v32 offset:6160
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[28:31], v32 offset:12304
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[36:39], v32 offset:20496
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v32
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[8:11], v32 offset:2048
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[16:19], v32 offset:6144
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[24:27], v32 offset:12288
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[32:35], v32 offset:20480
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(10) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(4)
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v47, v7
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(3)
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v55, v15
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(2)
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v63, v23
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(1)
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v71, v31
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v79, v39 :: v_dual_mov_b32 v46, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v45, v5 :: v_dual_mov_b32 v44, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v43, v3 :: v_dual_mov_b32 v42, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v41, v1 :: v_dual_mov_b32 v40, v0
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v54, v14 :: v_dual_mov_b32 v53, v13
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v52, v12 :: v_dual_mov_b32 v51, v11
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v50, v10 :: v_dual_mov_b32 v49, v9
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v48, v8
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v62, v22 :: v_dual_mov_b32 v61, v21
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v60, v20 :: v_dual_mov_b32 v59, v19
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v58, v18 :: v_dual_mov_b32 v57, v17
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v56, v16
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v70, v30 :: v_dual_mov_b32 v69, v29
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v68, v28 :: v_dual_mov_b32 v67, v27
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v66, v26 :: v_dual_mov_b32 v65, v25
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v64, v24
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v78, v38 :: v_dual_mov_b32 v77, v37
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v76, v36 :: v_dual_mov_b32 v75, v35
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v74, v34 :: v_dual_mov_b32 v73, v33
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v72, v32
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[40:47], v[0:7], v[0:7], v[40:47]
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[48:55], v[8:15], v[8:15], v[48:55]
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[56:63], v[16:23], v[16:23], v[56:63]
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[64:71], v[24:31], v[24:31], v[64:71]
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[72:79], v[32:39], v[32:39], v[72:79]
+; EXACTCUTOFF-NEXT:    ds_store_b128 v80, v[44:47] offset:16
+; EXACTCUTOFF-NEXT:    ds_store_b128 v80, v[40:43]
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[52:55] offset:2064
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[48:51] offset:2048
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[60:63] offset:4112
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[56:59] offset:4096
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[68:71] offset:6160
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[64:67] offset:6144
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[76:79] offset:8208
+; EXACTCUTOFF-NEXT:    ds_store_b128 v81, v[72:75] offset:8192
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(5) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(10) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %load.0.addr = getelementptr <16 x half>, ptr addrspace(3) %in, i32 %idx
+  %load.0 = load <16 x half>, ptr addrspace(3) %load.0.addr
+  %load.1.addr = getelementptr <16 x half>, ptr addrspace(3) %load.0.addr, i32 64
+  %load.1 = load <16 x half>, ptr addrspace(3) %load.1.addr
+  %load.2.addr = getelementptr <16 x half>, ptr addrspace(3) %load.1.addr, i32 128
+  %load.2 = load <16 x half>, ptr addrspace(3) %load.2.addr
+  %load.3.addr = getelementptr <16 x half>, ptr addrspace(3) %load.2.addr, i32 192
+  %load.3 = load <16 x half>, ptr addrspace(3) %load.3.addr
+  %load.4.addr = getelementptr <16 x half>, ptr addrspace(3) %load.3.addr, i32 256
+  %load.4 = load <16 x half>, ptr addrspace(3) %load.4.addr
+  %mai.0 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.0, <16 x half> %load.0, <16 x half> %load.0, i1 0)
+  %mai.1 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.1, <16 x half> %load.1, <16 x half> %load.1, i1 0)
+  %mai.2 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.2, <16 x half> %load.2, <16 x half> %load.2, i1 0)
+  %mai.3 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.3, <16 x half> %load.3, <16 x half> %load.3, i1 0)
+  %mai.4 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.4, <16 x half> %load.4, <16 x half> %load.4, i1 0)
+  %store.0.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 %idx
+  store <16 x half> %mai.0, ptr addrspace(3) %store.0.addr
+  %store.1.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 64
+  store <16 x half> %mai.1, ptr addrspace(3) %store.1.addr
+  %store.2.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 128
+  store <16 x half> %mai.2, ptr addrspace(3) %store.2.addr
+  %store.3.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 192
+  store <16 x half> %mai.3, ptr addrspace(3) %store.3.addr
+  %store.4.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 256
+  store <16 x half> %mai.4, ptr addrspace(3) %store.4.addr
+  ; 10 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 10, i32 0)
+  ; 5 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 5, i32 0)
+  ; 10 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 10, i32 0)
+  ret void
+}
+
+define amdgpu_kernel void @test_sched_group_barrier_pipeline_WMMA_interleave(ptr addrspace(3) noalias %in, ptr addrspace(3) noalias %out) #0 {
+; GCN-LABEL: test_sched_group_barrier_pipeline_WMMA_interleave:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v16, 5, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_add_nc_u32_e32 v17, s0, v16
+; GCN-NEXT:    v_add_nc_u32_e32 v16, s1, v16
+; GCN-NEXT:    ds_load_b128 v[4:7], v17 offset:16
+; GCN-NEXT:    ds_load_b128 v[0:3], v17
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; GCN-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; GCN-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; GCN-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v16, v[12:15] offset:16
+; GCN-NEXT:    ds_store_b128 v16, v[8:11]
+; GCN-NEXT:    ds_load_b128 v[4:7], v17 offset:2064
+; GCN-NEXT:    ds_load_b128 v[0:3], v17 offset:2048
+; GCN-NEXT:    v_mov_b32_e32 v16, s1
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; GCN-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; GCN-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; GCN-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v16, v[12:15] offset:2064
+; GCN-NEXT:    ds_store_b128 v16, v[8:11] offset:2048
+; GCN-NEXT:    ds_load_b128 v[4:7], v17 offset:6160
+; GCN-NEXT:    ds_load_b128 v[0:3], v17 offset:6144
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; GCN-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; GCN-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; GCN-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v16, v[12:15] offset:4112
+; GCN-NEXT:    ds_store_b128 v16, v[8:11] offset:4096
+; GCN-NEXT:    ds_load_b128 v[4:7], v17 offset:12304
+; GCN-NEXT:    ds_load_b128 v[0:3], v17 offset:12288
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; GCN-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; GCN-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; GCN-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v16, v[12:15] offset:6160
+; GCN-NEXT:    ds_store_b128 v16, v[8:11] offset:6144
+; GCN-NEXT:    ds_load_b128 v[4:7], v17 offset:20496
+; GCN-NEXT:    ds_load_b128 v[0:3], v17 offset:20480
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; GCN-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; GCN-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; GCN-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; GCN-NEXT:    ds_store_b128 v16, v[12:15] offset:8208
+; GCN-NEXT:    ds_store_b128 v16, v[8:11] offset:8192
+; GCN-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; GCN-NEXT:    s_endpgm
+;
+; EXACTCUTOFF-LABEL: test_sched_group_barrier_pipeline_WMMA_interleave:
+; EXACTCUTOFF:       ; %bb.0: ; %entry
+; EXACTCUTOFF-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
+; EXACTCUTOFF-NEXT:    v_lshlrev_b32_e32 v16, 5, v0
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_add_nc_u32_e32 v17, s0, v16
+; EXACTCUTOFF-NEXT:    v_add_nc_u32_e32 v16, s1, v16
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v17 offset:16
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v17
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[12:15] offset:16
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[8:11]
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v17 offset:2064
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v17 offset:2048
+; EXACTCUTOFF-NEXT:    v_mov_b32_e32 v16, s1
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[12:15] offset:2064
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[8:11] offset:2048
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v17 offset:6160
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v17 offset:6144
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[12:15] offset:4112
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[8:11] offset:4096
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v17 offset:12304
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v17 offset:12288
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[12:15] offset:6160
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[8:11] offset:6144
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[4:7], v17 offset:20496
+; EXACTCUTOFF-NEXT:    ds_load_b128 v[0:3], v17 offset:20480
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000100) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_waitcnt lgkmcnt(0)
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v15, v7 :: v_dual_mov_b32 v14, v6
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v13, v5 :: v_dual_mov_b32 v12, v4
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
+; EXACTCUTOFF-NEXT:    v_dual_mov_b32 v9, v1 :: v_dual_mov_b32 v8, v0
+; EXACTCUTOFF-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; EXACTCUTOFF-NEXT:    v_wmma_f16_16x16x16_f16 v[8:15], v[0:7], v[0:7], v[8:15]
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000008) size(1) SyncID(0)
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[12:15] offset:8208
+; EXACTCUTOFF-NEXT:    ds_store_b128 v16, v[8:11] offset:8192
+; EXACTCUTOFF-NEXT:    ; sched_group_barrier mask(0x00000200) size(2) SyncID(0)
+; EXACTCUTOFF-NEXT:    s_endpgm
+entry:
+  %idx = call i32 @llvm.amdgcn.workitem.id.x()
+  %load.0.addr = getelementptr <16 x half>, ptr addrspace(3) %in, i32 %idx
+  %load.0 = load <16 x half>, ptr addrspace(3) %load.0.addr
+  %load.1.addr = getelementptr <16 x half>, ptr addrspace(3) %load.0.addr, i32 64
+  %load.1 = load <16 x half>, ptr addrspace(3) %load.1.addr
+  %load.2.addr = getelementptr <16 x half>, ptr addrspace(3) %load.1.addr, i32 128
+  %load.2 = load <16 x half>, ptr addrspace(3) %load.2.addr
+  %load.3.addr = getelementptr <16 x half>, ptr addrspace(3) %load.2.addr, i32 192
+  %load.3 = load <16 x half>, ptr addrspace(3) %load.3.addr
+  %load.4.addr = getelementptr <16 x half>, ptr addrspace(3) %load.3.addr, i32 256
+  %load.4 = load <16 x half>, ptr addrspace(3) %load.4.addr
+  %mai.0 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.0, <16 x half> %load.0, <16 x half> %load.0, i1 0)
+  %mai.1 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.1, <16 x half> %load.1, <16 x half> %load.1, i1 0)
+  %mai.2 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.2, <16 x half> %load.2, <16 x half> %load.2, i1 0)
+  %mai.3 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.3, <16 x half> %load.3, <16 x half> %load.3, i1 0)
+  %mai.4 = call <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half> %load.4, <16 x half> %load.4, <16 x half> %load.4, i1 0)
+  %store.0.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 %idx
+  store <16 x half> %mai.0, ptr addrspace(3) %store.0.addr
+  %store.1.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 64
+  store <16 x half> %mai.1, ptr addrspace(3) %store.1.addr
+  %store.2.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 128
+  store <16 x half> %mai.2, ptr addrspace(3) %store.2.addr
+  %store.3.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 192
+  store <16 x half> %mai.3, ptr addrspace(3) %store.3.addr
+  %store.4.addr = getelementptr <16 x half>, ptr addrspace(3) %out, i32 256
+  store <16 x half> %mai.4, ptr addrspace(3) %store.4.addr
+  ; 2 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 2, i32 0)
+  ; 1 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 2 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 2, i32 0)
+  ; 2 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 2, i32 0)
+  ; 1 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 2 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 2, i32 0)
+  ; 2 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 2, i32 0)
+  ; 1 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 2 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 2, i32 0)
+  ; 2 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 2, i32 0)
+  ; 1 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 2 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 2, i32 0)
+  ; 2 DS read
+  call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 2, i32 0)
+  ; 1 WMMA
+  call void @llvm.amdgcn.sched.group.barrier(i32 8, i32 1, i32 0)
+  ; 2 DS write
+  call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 2, i32 0)
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+declare void @llvm.amdgcn.sched.group.barrier(i32, i32, i32) #1
+declare <16 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16(<16 x half>, <16 x half> , <16 x half>, i1 immarg) #1
+
+attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,32" }
+attributes #1 = { nounwind }
+attributes #2 = { nounwind readnone speculatable }


        


More information about the llvm-commits mailing list