[llvm-branch-commits] [llvm] 6a19549 - [AMDGPU] Fix failing assert with scratch ST mode

Sebastian Neubauer via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 12 00:58:27 PST 2021


Author: Sebastian Neubauer
Date: 2021-01-12T09:54:02+01:00
New Revision: 6a195491b6028185c7278718ac21bca309a6c4ea

URL: https://github.com/llvm/llvm-project/commit/6a195491b6028185c7278718ac21bca309a6c4ea
DIFF: https://github.com/llvm/llvm-project/commit/6a195491b6028185c7278718ac21bca309a6c4ea.diff

LOG: [AMDGPU] Fix failing assert with scratch ST mode

In ST mode, flat scratch instructions have neither an sgpr nor a vgpr
for the address. This lead to an assertion when inserting hard clauses.

Differential Revision: https://reviews.llvm.org/D94406

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/TargetInstrInfo.h
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/test/CodeGen/AMDGPU/memory_clause.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 1cf205f9f5a3..36afdefd27b2 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1298,10 +1298,11 @@ class TargetInstrInfo : public MCInstrInfo {
                                bool &OffsetIsScalable,
                                const TargetRegisterInfo *TRI) const;
 
-  /// Get the base operands and byte offset of an instruction that reads/writes
-  /// memory.
+  /// Get zero or more base operands and the byte offset of an instruction that
+  /// reads/writes memory. Note that there may be zero base operands if the
+  /// instruction accesses a constant address.
   /// It returns false if MI does not read/write memory.
-  /// It returns false if no base operands and offset was found.
+  /// It returns false if base operands and offset could not be determined.
   /// It is not guaranteed to always recognize base operands and offsets in all
   /// cases.
   virtual bool getMemOperandsWithOffsetWidth(

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index eebee8e16bc3..6bf9db3f7b2c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -387,7 +387,7 @@ bool SIInstrInfo::getMemOperandsWithOffsetWidth(
   }
 
   if (isFLAT(LdSt)) {
-    // Instructions have either vaddr or saddr or both.
+    // Instructions have either vaddr or saddr or both or none.
     BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
     if (BaseOp)
       BaseOps.push_back(BaseOp);
@@ -443,11 +443,15 @@ bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
                                       unsigned NumBytes) const {
   // If the mem ops (to be clustered) do not have the same base ptr, then they
   // should not be clustered
-  assert(!BaseOps1.empty() && !BaseOps2.empty());
-  const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
-  const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
-  if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
+  if (!BaseOps1.empty() && !BaseOps2.empty()) {
+    const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
+    const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
+    if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
+      return false;
+  } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
+    // If only one base op is empty, they do not have the same base ptr
     return false;
+  }
 
   // In order to avoid regester pressure, on an average, the number of DWORDS
   // loaded together by all clustered mem ops should not exceed 8. This is an

diff  --git a/llvm/test/CodeGen/AMDGPU/memory_clause.ll b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
index 2c5931ef57b6..154d8e3320ea 100644
--- a/llvm/test/CodeGen/AMDGPU/memory_clause.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SCRATCH %s
 
 define amdgpu_kernel void @vector_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) {
 ; GCN-LABEL: vector_clause:
@@ -21,6 +22,31 @@ define amdgpu_kernel void @vector_clause(<4 x i32> addrspace(1)* noalias nocaptu
 ; GCN-NEXT:    s_waitcnt vmcnt(3)
 ; GCN-NEXT:    global_store_dwordx4 v16, v[12:15], s[4:5] offset:48
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-SCRATCH-LABEL: vector_clause:
+; GCN-SCRATCH:       ; %bb.0: ; %bb
+; GCN-SCRATCH-NEXT:    s_add_u32 s2, s2, s5
+; GCN-SCRATCH-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN-SCRATCH-NEXT:    v_lshlrev_b32_e32 v16, 4, v0
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x2c
+; GCN-SCRATCH-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    s_clause 0x3
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[0:3], v16, s[2:3]
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[4:7], v16, s[2:3] offset:16
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[8:11], v16, s[2:3] offset:32
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[12:15], v16, s[2:3] offset:48
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(3)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[0:3], s[0:1]
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(2)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[4:7], s[0:1] offset:16
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[8:11], s[0:1] offset:32
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[12:15], s[0:1] offset:48
+; GCN-SCRATCH-NEXT:    s_endpgm
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = zext i32 %tmp to i64
@@ -79,6 +105,45 @@ define amdgpu_kernel void @scalar_clause(<4 x i32> addrspace(1)* noalias nocaptu
 ; GCN-NEXT:    v_mov_b32_e32 v3, s15
 ; GCN-NEXT:    global_store_dwordx4 v12, v[0:3], s[18:19] offset:48
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-SCRATCH-LABEL: scalar_clause:
+; GCN-SCRATCH:       ; %bb.0: ; %bb
+; GCN-SCRATCH-NEXT:    s_add_u32 s2, s2, s5
+; GCN-SCRATCH-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x24
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[16:17], s[0:1], 0x2c
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v16, 0
+; GCN-SCRATCH-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    s_clause 0x3
+; GCN-SCRATCH-NEXT:    s_load_dwordx4 s[0:3], s[12:13], 0x0
+; GCN-SCRATCH-NEXT:    s_load_dwordx4 s[4:7], s[12:13], 0x10
+; GCN-SCRATCH-NEXT:    s_load_dwordx4 s[8:11], s[12:13], 0x20
+; GCN-SCRATCH-NEXT:    s_load_dwordx4 s[12:15], s[12:13], 0x30
+; GCN-SCRATCH-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v8, s8
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v6, s6
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v7, s7
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v12, s12
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v9, s9
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v10, s10
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v11, s11
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v13, s13
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v14, s14
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v15, s15
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[0:3], s[16:17]
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[4:7], s[16:17] offset:16
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[8:11], s[16:17] offset:32
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v16, v[12:15], s[16:17] offset:48
+; GCN-SCRATCH-NEXT:    s_endpgm
 bb:
   %tmp = load <4 x i32>, <4 x i32> addrspace(1)* %arg, align 16
   %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 1
@@ -143,6 +208,30 @@ define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %ar
 ; GCN-NEXT:    buffer_store_dword v15, v1, s[0:3], 0 offen offset:48
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-SCRATCH-LABEL: mubuf_clause:
+; GCN-SCRATCH:       ; %bb.0: ; %bb
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    v_and_b32_e32 v2, 0x3ff, v2
+; GCN-SCRATCH-NEXT:    v_lshlrev_b32_e32 v18, 4, v2
+; GCN-SCRATCH-NEXT:    v_add_nc_u32_e32 v0, v0, v18
+; GCN-SCRATCH-NEXT:    s_clause 0x3
+; GCN-SCRATCH-NEXT:    scratch_load_dwordx4 v[2:5], v0, off
+; GCN-SCRATCH-NEXT:    scratch_load_dwordx4 v[6:9], v0, off offset:16
+; GCN-SCRATCH-NEXT:    scratch_load_dwordx4 v[10:13], v0, off offset:32
+; GCN-SCRATCH-NEXT:    scratch_load_dwordx4 v[14:17], v0, off offset:48
+; GCN-SCRATCH-NEXT:    v_add_nc_u32_e32 v0, v1, v18
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(3)
+; GCN-SCRATCH-NEXT:    scratch_store_dwordx4 v0, v[2:5], off
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(2)
+; GCN-SCRATCH-NEXT:    scratch_store_dwordx4 v0, v[6:9], off offset:16
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
+; GCN-SCRATCH-NEXT:    scratch_store_dwordx4 v0, v[10:13], off offset:32
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    scratch_store_dwordx4 v0, v[14:17], off offset:48
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
   %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp
@@ -184,6 +273,28 @@ define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias noca
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
 ; GCN-NEXT:    global_store_dwordx4 v8, v[4:7], s[4:5] offset:16
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-SCRATCH-LABEL: vector_clause_indirect:
+; GCN-SCRATCH:       ; %bb.0: ; %bb
+; GCN-SCRATCH-NEXT:    s_add_u32 s2, s2, s5
+; GCN-SCRATCH-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GCN-SCRATCH-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-SCRATCH-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    global_load_dwordx2 v[4:5], v0, s[2:3]
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[0:3], v[4:5], off
+; GCN-SCRATCH-NEXT:    global_load_dwordx4 v[4:7], v[4:5], off offset:16
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1]
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
+; GCN-SCRATCH-NEXT:    s_endpgm
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
   %tmp3 = zext i32 %tmp to i64
@@ -213,6 +324,21 @@ define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrs
 ; GCN-NEXT:    global_store_dword v[3:4], v2, off offset:128
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-SCRATCH-LABEL: load_global_d16_hi:
+; GCN-SCRATCH:       ; %bb.0: ; %entry
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    global_load_short_d16_hi v6, v[0:1], off
+; GCN-SCRATCH-NEXT:    global_load_short_d16_hi v2, v[0:1], off offset:64
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
+; GCN-SCRATCH-NEXT:    global_store_dword v[3:4], v6, off
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    global_store_dword v[3:4], v2, off offset:128
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32
   %load1 = load i16, i16 addrspace(1)* %in
@@ -241,6 +367,21 @@ define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrs
 ; GCN-NEXT:    global_store_dword v[3:4], v2, off offset:128
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-SCRATCH-LABEL: load_global_d16_lo:
+; GCN-SCRATCH:       ; %bb.0: ; %entry
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    global_load_short_d16 v6, v[0:1], off
+; GCN-SCRATCH-NEXT:    global_load_short_d16 v2, v[0:1], off offset:64
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
+; GCN-SCRATCH-NEXT:    global_store_dword v[3:4], v6, off
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    global_store_dword v[3:4], v2, off offset:128
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32
   %reg.bc1 = bitcast i32 %reg to <2 x i16>
@@ -255,4 +396,141 @@ entry:
   ret void
 }
 
+define amdgpu_kernel void @flat_scratch_load(float %a, float %b, <8 x i32> %desc) {
+; GCN-LABEL: flat_scratch_load:
+; GCN:       ; %bb.0: ; %.entry
+; GCN-NEXT:    s_mov_b32 s16, SCRATCH_RSRC_DWORD0
+; GCN-NEXT:    s_mov_b32 s17, SCRATCH_RSRC_DWORD1
+; GCN-NEXT:    s_mov_b32 s18, -1
+; GCN-NEXT:    s_mov_b32 s19, 0xe00000
+; GCN-NEXT:    s_add_u32 s16, s16, s3
+; GCN-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-NEXT:    s_mov_b64 s[12:13], exec
+; GCN-NEXT:    s_wqm_b64 exec, exec
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x40b00000
+; GCN-NEXT:    s_load_dwordx2 s[14:15], s[0:1], 0x24
+; GCN-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x44
+; GCN-NEXT:    buffer_store_dword v0, off, s[16:19], 0 offset:4
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    buffer_load_dword v2, off, s[16:19], 0 offset:4
+; GCN-NEXT:    s_brev_b32 s0, 1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    s_mov_b32 s3, 0
+; GCN-NEXT:    s_mov_b32 s1, s0
+; GCN-NEXT:    s_mov_b32 s2, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-NEXT:    s_and_b64 exec, exec, s[12:13]
+; GCN-NEXT:    image_sample v0, v[0:1], s[4:11], s[0:3] dmask:0x1
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_f32_e32 v0, v2, v0
+; GCN-NEXT:    exp mrt0 v0, off, off, off done vm
+; GCN-NEXT:    s_endpgm
+;
+; GCN-SCRATCH-LABEL: flat_scratch_load:
+; GCN-SCRATCH:       ; %bb.0: ; %.entry
+; GCN-SCRATCH-NEXT:    s_add_u32 s2, s2, s5
+; GCN-SCRATCH-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GCN-SCRATCH-NEXT:    s_mov_b32 s9, exec_lo
+; GCN-SCRATCH-NEXT:    s_wqm_b32 exec_lo, exec_lo
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    s_load_dwordx2 s[10:11], s[0:1], 0x24
+; GCN-SCRATCH-NEXT:    s_load_dwordx8 s[0:7], s[0:1], 0x44
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v0, 0x40b00000
+; GCN-SCRATCH-NEXT:    s_brev_b32 s8, 1
+; GCN-SCRATCH-NEXT:    scratch_store_dword off, v0, off offset:4
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    ;;#ASMSTART
+; GCN-SCRATCH-NEXT:    ;;#ASMEND
+; GCN-SCRATCH-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-SCRATCH-NEXT:    s_and_b32 exec_lo, exec_lo, s9
+; GCN-SCRATCH-NEXT:    s_mov_b32 s11, 0
+; GCN-SCRATCH-NEXT:    s_mov_b32 s9, s8
+; GCN-SCRATCH-NEXT:    s_mov_b32 s10, s8
+; GCN-SCRATCH-NEXT:    scratch_load_dword v2, off, off offset:4
+; GCN-SCRATCH-NEXT:    image_sample v0, v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    v_add_f32_e32 v0, v2, v0
+; GCN-SCRATCH-NEXT:    exp mrt0 v0, off, off, off done vm
+; GCN-SCRATCH-NEXT:    s_endpgm
+.entry:
+  %alloca = alloca float, align 4, addrspace(5)
+  store volatile float 5.5, float addrspace(5)* %alloca
+  call void asm sideeffect "", ""()
+  ; There was a bug with flat scratch instructions that do not not use any address registers (ST mode).
+  ; To trigger, the scratch_load has to be immediately before the image_sample in MIR.
+  %load = load float, float addrspace(5)* %alloca
+  %val = call <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 9, float %a, float %b, <8 x i32> %desc, <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 0>, i1 false, i32 0, i32 0)
+  %val0 = extractelement <2 x float> %val, i32 0
+  %valadd = fadd float %load, %val0
+  call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 1, float %valadd, float undef, float undef, float undef, i1 immarg true, i1 immarg true)
+  ret void
+}
+
+define amdgpu_kernel void @flat_scratch_load_clause(float %a, float %b, <8 x i32> %desc) {
+; GCN-LABEL: flat_scratch_load_clause:
+; GCN:       ; %bb.0: ; %.entry
+; GCN-NEXT:    s_mov_b32 s4, SCRATCH_RSRC_DWORD0
+; GCN-NEXT:    s_mov_b32 s5, SCRATCH_RSRC_DWORD1
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s7, 0xe00000
+; GCN-NEXT:    s_add_u32 s4, s4, s3
+; GCN-NEXT:    s_addc_u32 s5, s5, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x40b00000
+; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0 offset:4
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x40d00000
+; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0 offset:8
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0 offset:4
+; GCN-NEXT:    buffer_load_dword v1, off, s[4:7], 0 offset:8
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_f32_e32 v0, v0, v1
+; GCN-NEXT:    exp mrt0 v0, off, off, off done vm
+; GCN-NEXT:    s_endpgm
+;
+; GCN-SCRATCH-LABEL: flat_scratch_load_clause:
+; GCN-SCRATCH:       ; %bb.0: ; %.entry
+; GCN-SCRATCH-NEXT:    s_add_u32 s2, s2, s5
+; GCN-SCRATCH-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GCN-SCRATCH-NEXT:    s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v0, 0x40b00000
+; GCN-SCRATCH-NEXT:    v_mov_b32_e32 v1, 0x40d00000
+; GCN-SCRATCH-NEXT:    scratch_store_dword off, v0, off offset:4
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    scratch_store_dword off, v1, off offset:8
+; GCN-SCRATCH-NEXT:    s_waitcnt_vscnt null, 0x0
+; GCN-SCRATCH-NEXT:    ;;#ASMSTART
+; GCN-SCRATCH-NEXT:    ;;#ASMEND
+; GCN-SCRATCH-NEXT:    s_clause 0x1
+; GCN-SCRATCH-NEXT:    scratch_load_dword v0, off, off offset:4
+; GCN-SCRATCH-NEXT:    scratch_load_dword v1, off, off offset:8
+; GCN-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
+; GCN-SCRATCH-NEXT:    v_add_f32_e32 v0, v0, v1
+; GCN-SCRATCH-NEXT:    exp mrt0 v0, off, off, off done vm
+; GCN-SCRATCH-NEXT:    s_endpgm
+.entry:
+  %alloca = alloca float, align 4, addrspace(5)
+  %alloca2 = alloca float, align 4, addrspace(5)
+  store volatile float 5.5, float addrspace(5)* %alloca
+  store volatile float 6.5, float addrspace(5)* %alloca2
+  call void asm sideeffect "", ""()
+  %load0 = load float, float addrspace(5)* %alloca
+  %load1 = load float, float addrspace(5)* %alloca2
+  %valadd = fadd float %load0, %load1
+  call void @llvm.amdgcn.exp.f32(i32 immarg 0, i32 immarg 1, float %valadd, float undef, float undef, float undef, i1 immarg true, i1 immarg true)
+  ret void
+}
+
 declare i32 @llvm.amdgcn.workitem.id.x()
+declare void @llvm.amdgcn.exp.f32(i32 immarg, i32 immarg, float, float, float, float, i1 immarg, i1 immarg)
+declare <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg)


        


More information about the llvm-branch-commits mailing list