[llvm] 349b6af - [AMDGPU] Remove the assertion for MUBUF instruction with voffset

Christudasan Devadasan via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 15 22:39:56 PST 2022


Author: Christudasan Devadasan
Date: 2022-11-16T12:06:37+05:30
New Revision: 349b6afef47ba34ee3328f7ec6becf1d8c1be2ea

URL: https://github.com/llvm/llvm-project/commit/349b6afef47ba34ee3328f7ec6becf1d8c1be2ea
DIFF: https://github.com/llvm/llvm-project/commit/349b6afef47ba34ee3328f7ec6becf1d8c1be2ea.diff

LOG: [AMDGPU] Remove the assertion for MUBUF instruction with voffset

Currently, there is an assertion that limits the MUBUF instruction
with voffset used for a VGPR spill inside kernel functions when
the frame pointer is really used. It seemed more like an unwanted
limitation and hence removing the assertion.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D137892

Added: 
    llvm/test/CodeGen/AMDGPU/kernel-mubuf-with-voffset.mir
    llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 249de0d3892b6..3d96d056bbb78 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1594,7 +1594,6 @@ void SIRegisterInfo::buildSpillLoadStore(
     if (SOffset == AMDGPU::NoRegister) {
       if (!IsFlat) {
         if (UseVGPROffset && ScratchOffsetReg) {
-          assert(!FuncInfo->isEntryFunction());
           MIB.addReg(ScratchOffsetReg);
         } else {
           assert(FuncInfo->isEntryFunction());

diff  --git a/llvm/test/CodeGen/AMDGPU/kernel-mubuf-with-voffset.mir b/llvm/test/CodeGen/AMDGPU/kernel-mubuf-with-voffset.mir
new file mode 100644
index 0000000000000..7a913cf50ea2b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/kernel-mubuf-with-voffset.mir
@@ -0,0 +1,63 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck %s
+
+# Compiler used to assert when voffset field is enabled in the MUBUF instruction for a VGPR spill inside a kernel body
+# when the frame pointer is enabled. That limitation is now removed and this test should compile without any crash.
+
+--- |
+  define amdgpu_kernel void @kernel_vgpr32_spill() #0 {
+    ret void
+  }
+
+  attributes #0 = { "frame-pointer"="all"}
+...
+---
+name: kernel_vgpr32_spill
+tracksRegLiveness: true
+stack:
+  - { id: 0, type: default, offset: 0, size: 8192, alignment: 8 }
+  - { id: 1, type: spill-slot, offset: 0, size: 4, alignment: 4 }
+
+machineFunctionInfo:
+  isEntryFunction: true
+  scratchRSrcReg:  $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg:  $sgpr33
+  stackPtrOffsetReg:  $sgpr32
+  argumentInfo:
+    privateSegmentBuffer: { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' }
+body:             |
+  ; CHECK-LABEL: name: kernel_vgpr32_spill
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19, $sgpr20, $sgpr21, $sgpr22, $sgpr23, $sgpr24, $sgpr25, $sgpr26, $sgpr27, $sgpr28, $sgpr29, $sgpr30, $sgpr31, $sgpr34, $sgpr35, $sgpr36, $sgpr37, $sgpr38, $sgpr39, $sgpr40, $sgpr41, $sgpr42, $sgpr43, $sgpr44, $sgpr45, $sgpr46, $sgpr47, $sgpr48, $sgpr49, $sgpr50, $sgpr51, $sgpr52, $sgpr53, $sgpr54, $sgpr55, $sgpr56, $sgpr57, $sgpr58, $sgpr59, $sgpr60, $sgpr61, $sgpr62, $sgpr63, $sgpr64, $sgpr65, $sgpr66, $sgpr67, $sgpr68, $sgpr69, $sgpr70, $sgpr71, $sgpr72, $sgpr73, $sgpr74, $sgpr75, $sgpr76, $sgpr77, $sgpr78, $sgpr79, $sgpr80, $sgpr81, $sgpr82, $sgpr83, $sgpr84, $sgpr85, $sgpr86, $sgpr87, $sgpr88, $sgpr89, $sgpr90, $sgpr91, $sgpr92, $sgpr93, $sgpr94, $sgpr95, $sgpr96, $sgpr97, $sgpr98, $sgpr99, $sgpr100, $sgpr101, $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   $sgpr33 = S_MOV_B32 0
+  ; CHECK-NEXT:   $sgpr0 = S_ADD_U32 $sgpr0, $noreg, implicit-def $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr1 = S_ADDC_U32 $sgpr1, 0, implicit-def dead $scc, implicit $scc, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   S_CMP_EQ_U32 0, 0, implicit-def $scc
+  ; CHECK-NEXT:   $vgpr1 = V_MOV_B32_e32 8200, implicit $exec
+  ; CHECK-NEXT:   BUFFER_STORE_DWORD_OFFEN $vgpr0, killed $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr33, 0, 0, 0, implicit $exec :: (store (s32) into %stack.1, addrspace 5)
+  ; CHECK-NEXT:   S_CBRANCH_SCC1 %bb.2, implicit $scc
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   S_NOP 0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   S_ENDPGM 0
+  bb.0:
+    liveins: $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19, $sgpr20, $sgpr21, $sgpr22, $sgpr23, $sgpr24, $sgpr25, $sgpr26, $sgpr27, $sgpr28, $sgpr29, $sgpr30, $sgpr31, $sgpr34, $sgpr35, $sgpr36, $sgpr37, $sgpr38, $sgpr39, $sgpr40, $sgpr41, $sgpr42, $sgpr43, $sgpr44, $sgpr45, $sgpr46, $sgpr47, $sgpr48, $sgpr49, $sgpr50, $sgpr51, $sgpr52, $sgpr53, $sgpr54, $sgpr55, $sgpr56, $sgpr57, $sgpr58, $sgpr59, $sgpr60, $sgpr61, $sgpr62, $sgpr63, $sgpr64, $sgpr65, $sgpr66, $sgpr67, $sgpr68, $sgpr69, $sgpr70, $sgpr71, $sgpr72, $sgpr73, $sgpr74, $sgpr75, $sgpr76, $sgpr77, $sgpr78, $sgpr79, $sgpr80, $sgpr81, $sgpr82, $sgpr83, $sgpr84, $sgpr85, $sgpr86, $sgpr87, $sgpr88, $sgpr89, $sgpr90, $sgpr91, $sgpr92, $sgpr93, $sgpr94, $sgpr95, $sgpr96, $sgpr97, $sgpr98, $sgpr99, $sgpr100, $sgpr101, $vgpr0
+    S_CMP_EQ_U32 0, 0, implicit-def $scc
+    SI_SPILL_V32_SAVE $vgpr0, %stack.1, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.1, align 4, addrspace 5)
+    S_CBRANCH_SCC1 %bb.2, implicit $scc
+
+  bb.1:
+    S_NOP 0
+
+  bb.2:
+    S_ENDPGM 0
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
new file mode 100644
index 0000000000000..f52783690ca80
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/kernel-vgpr-spill-mubuf-with-voffset.ll
@@ -0,0 +1,94 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -O0 -verify-machineinstrs %s -o - | FileCheck %s
+
+; The forced spill to preserve the scratch VGPR require the voffset to hold the large offset
+; value in the MUBUF instruction being emitted before s_cbranch_scc1 as it clobbers the SCC.
+
+define amdgpu_kernel void @test_kernel(i32 %val) #0 {
+; CHECK-LABEL: test_kernel:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_mov_b32 s32, 0x180000
+; CHECK-NEXT:    s_mov_b32 s33, 0
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s12, s17
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s13, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s17
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_writelane_b32 v40, s16, 0
+; CHECK-NEXT:    s_mov_b32 s13, s15
+; CHECK-NEXT:    s_mov_b32 s12, s14
+; CHECK-NEXT:    v_readlane_b32 s14, v40, 0
+; CHECK-NEXT:    s_mov_b64 s[16:17], s[8:9]
+; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    v_mov_b32_e32 v2, v1
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    s_load_dword s8, s[16:17], 0x0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_writelane_b32 v40, s8, 1
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; def vgpr10
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    s_add_i32 s34, s33, 0x100100
+; CHECK-NEXT:    buffer_store_dword v10, off, s[0:3], s34 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_mov_b64 s[18:19], 8
+; CHECK-NEXT:    s_mov_b32 s8, s16
+; CHECK-NEXT:    s_mov_b32 s9, s17
+; CHECK-NEXT:    s_mov_b32 s16, s18
+; CHECK-NEXT:    s_mov_b32 s15, s19
+; CHECK-NEXT:    s_add_u32 s8, s8, s16
+; CHECK-NEXT:    s_addc_u32 s15, s9, s15
+; CHECK-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
+; CHECK-NEXT:    s_mov_b32 s9, s15
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0x2000
+; CHECK-NEXT:    ; implicit-def: $sgpr15
+; CHECK-NEXT:    s_getpc_b64 s[16:17]
+; CHECK-NEXT:    s_add_u32 s16, s16, device_func at gotpcrel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s17, s17, device_func at gotpcrel32@hi+12
+; CHECK-NEXT:    s_load_dwordx2 s[16:17], s[16:17], 0x0
+; CHECK-NEXT:    s_mov_b64 s[22:23], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[20:21], s[0:1]
+; CHECK-NEXT:    s_mov_b32 s15, 20
+; CHECK-NEXT:    v_lshlrev_b32_e64 v3, s15, v3
+; CHECK-NEXT:    s_mov_b32 s15, 10
+; CHECK-NEXT:    v_lshlrev_b32_e64 v2, s15, v2
+; CHECK-NEXT:    v_or3_b32 v31, v1, v2, v3
+; CHECK-NEXT:    ; implicit-def: $sgpr15
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[20:21]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT:    s_add_i32 s6, s33, 0x100100
+; CHECK-NEXT:    buffer_load_dword v10, off, s[0:3], s6 ; 4-byte Folded Reload
+; CHECK-NEXT:    v_readlane_b32 s4, v40, 1
+; CHECK-NEXT:    s_mov_b32 s5, 0
+; CHECK-NEXT:    s_cmp_eq_u32 s4, s5
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0x4000
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    buffer_store_dword v10, v0, s[0:3], s33 offen ; 4-byte Folded Spill
+; CHECK-NEXT:    s_cbranch_scc1 .LBB0_2
+; CHECK-NEXT:  ; %bb.1: ; %store
+; CHECK-NEXT:    s_add_i32 s5, s33, 0x100000
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s5 ; 4-byte Folded Reload
+; CHECK-NEXT:    ; implicit-def: $sgpr4
+; CHECK-NEXT:    v_mov_b32_e32 v0, s4
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ds_write_b32 v0, v1
+; CHECK-NEXT:    s_endpgm
+; CHECK-NEXT:  .LBB0_2: ; %end
+; CHECK-NEXT:    s_endpgm
+  %arr = alloca < 1339 x i32>, align 8192, addrspace(5)
+  %cmp = icmp ne i32 %val, 0
+  %vreg = call i32 asm sideeffect "; def vgpr10", "={v10}"()
+  call void @device_func(<1339 x i32> addrspace(5)* %arr)
+  br i1 %cmp, label %store, label %end
+
+store:
+  store volatile i32 %vreg, i32 addrspace(3)* undef
+  ret void
+
+end:
+  ret void
+}
+
+declare void @device_func(<1339 x i32> addrspace(5)*)
+
+attributes #0 = { nounwind "frame-pointer"="all" }


        


More information about the llvm-commits mailing list