[llvm] ab593ba - AMDGPU: Add base test for future optimization patch

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 12 15:57:45 PST 2022


Author: Matt Arsenault
Date: 2022-01-12T18:21:23-05:00
New Revision: ab593baa2dc1361b0d82fec13487ae0393e0908d

URL: https://github.com/llvm/llvm-project/commit/ab593baa2dc1361b0d82fec13487ae0393e0908d
DIFF: https://github.com/llvm/llvm-project/commit/ab593baa2dc1361b0d82fec13487ae0393e0908d.diff

LOG: AMDGPU: Add base test for future optimization patch

Added: 
    llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll b/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll
new file mode 100644
index 000000000000..30339476bce8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/call-reqd-group-size.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck %s
+
+; Check for optimizing the passed implicit workitem ID based on the
+; required group size. This should avoid a few bit packing operations.
+
+declare hidden void @callee() #0
+
+define amdgpu_kernel void @known_x_0(i32 addrspace(1)* %out) !reqd_work_group_size !0 {
+; CHECK-LABEL: known_x_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+define amdgpu_kernel void @known_y_0(i32 addrspace(1)* %out) !reqd_work_group_size !1 {
+; CHECK-LABEL: known_y_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+define amdgpu_kernel void @known_z_0(i32 addrspace(1)* %out) !reqd_work_group_size !2 {
+; CHECK-LABEL: known_z_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+define amdgpu_kernel void @known_yz_0(i32 addrspace(1)* %out) !reqd_work_group_size !3 {
+; CHECK-LABEL: known_yz_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+define amdgpu_kernel void @known_xz_0(i32 addrspace(1)* %out) !reqd_work_group_size !4 {
+; CHECK-LABEL: known_xz_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+define amdgpu_kernel void @known_xyz_0(i32 addrspace(1)* %out) !reqd_work_group_size !5 {
+; CHECK-LABEL: known_xyz_0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s6, s9
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s7, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s9
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
+; CHECK-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    v_or3_b32 v31, v0, v1, v2
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee at rel32@hi+12
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+  call void @callee()
+  ret void
+}
+
+attributes #0 = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" }
+
+!0 = !{i32 1, i32 64, i32 64}
+!1 = !{i32 64, i32 1, i32 64}
+!2 = !{i32 64, i32 64, i32 1}
+!3 = !{i32 64, i32 1, i32 1}
+!4 = !{i32 1, i32 64, i32 1}
+!5 = !{i32 1, i32 1, i32 1}


        


More information about the llvm-commits mailing list