[llvm] [AMDGPU] Fix an issue that wrong index is used in calculation of byte provider when the op is extract_vector_elt (PR #91697)

Shilei Tian via llvm-commits llvm-commits at lists.llvm.org
Fri May 10 11:26:35 PDT 2024


================
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+
+define protected amdgpu_kernel void @test(ptr addrspace(1) %srcA, ptr addrspace(1) %srcB, ptr addrspace(1) %dst) {
+; GFX9-LABEL: test:
+; GFX9:       ; %bb.0: ; %entry
+; GFX9-NEXT:    s_load_dword s7, s[4:5], 0x24
+; GFX9-NEXT:    s_load_dword s8, s[4:5], 0x40
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x10
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_and_b32 s4, s7, 0xffff
+; GFX9-NEXT:    s_mul_i32 s6, s6, s4
+; GFX9-NEXT:    s_add_i32 s8, s8, s6
+; GFX9-NEXT:    v_add_u32_e32 v0, s8, v0
+; GFX9-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GFX9-NEXT:    v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off
+; GFX9-NEXT:    s_mov_b32 s0, 0x3020504
+; GFX9-NEXT:    v_mov_b32_e32 v6, s3
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, s2, v4
+; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v6, v5, vcc
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    v_perm_b32 v2, v2, v2, s0
+; GFX9-NEXT:    v_perm_b32 v0, v0, v0, s0
+; GFX9-NEXT:    v_not_b32_e32 v3, v3
+; GFX9-NEXT:    v_not_b32_e32 v1, v1
+; GFX9-NEXT:    v_not_b32_e32 v2, v2
+; GFX9-NEXT:    v_not_b32_e32 v0, v0
+; GFX9-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
+; GFX9-NEXT:    s_endpgm
+;
+; GFX10-LABEL: test:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    s_clause 0x1
+; GFX10-NEXT:    s_load_dword s0, s[4:5], 0x24
+; GFX10-NEXT:    s_load_dword s2, s[4:5], 0x40
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_and_b32 s3, s0, 0xffff
+; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX10-NEXT:    s_mul_i32 s6, s6, s3
+; GFX10-NEXT:    v_add3_u32 v0, s2, s6, v0
+; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x10
+; GFX10-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GFX10-NEXT:    v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v4, vcc_lo, s2, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_perm_b32 v2, v2, v2, 0x3020504
+; GFX10-NEXT:    v_perm_b32 v0, v0, v0, 0x3020504
+; GFX10-NEXT:    v_not_b32_e32 v3, v3
+; GFX10-NEXT:    v_not_b32_e32 v1, v1
+; GFX10-NEXT:    v_not_b32_e32 v2, v2
+; GFX10-NEXT:    v_not_b32_e32 v0, v0
+; GFX10-NEXT:    global_store_dwordx4 v[4:5], v[0:3], off
+; GFX10-NEXT:    s_endpgm
+;
+; GFX11-LABEL: test:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    s_clause 0x1
+; GFX11-NEXT:    s_load_b32 s2, s[0:1], 0x24
+; GFX11-NEXT:    s_load_b32 s4, s[0:1], 0x40
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_and_b32 s5, s2, 0xffff
+; GFX11-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX11-NEXT:    s_mul_i32 s15, s15, s5
+; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x10
+; GFX11-NEXT:    v_add3_u32 v0, s4, s15, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GFX11-NEXT:    v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, s2, v4
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, s3, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v4, vcc_lo, s0, v4
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, s1, v5, vcc_lo
+; GFX11-NEXT:    global_load_b128 v[0:3], v[0:1], off
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_perm_b32 v2, v2, v2, 0x3020504
+; GFX11-NEXT:    v_perm_b32 v0, v0, v0, 0x3020504
+; GFX11-NEXT:    v_not_b32_e32 v3, v3
+; GFX11-NEXT:    v_not_b32_e32 v1, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_not_b32_e32 v2, v2
+; GFX11-NEXT:    v_not_b32_e32 v0, v0
+; GFX11-NEXT:    global_store_b128 v[4:5], v[0:3], off
+; GFX11-NEXT:    s_nop 0
+; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT:    s_endpgm
+entry:
+  %test.kernarg.segment = call nonnull align 16 dereferenceable(280) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+  %srcA.kernarg.offset4 = bitcast ptr addrspace(4) %test.kernarg.segment to ptr addrspace(4)
+  %srcA.load = load ptr addrspace(1), ptr addrspace(4) %srcA.kernarg.offset4, align 16
+  %dst.kernarg.offset = getelementptr inbounds i8, ptr addrspace(4) %test.kernarg.segment, i64 16
+  %dst.load = load ptr addrspace(1), ptr addrspace(4) %dst.kernarg.offset, align 16
+  %0 = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
----------------
shiltian wrote:

I reduced it further. This looks like the minimal version that can produce `v_perm_b32` instruction.

https://github.com/llvm/llvm-project/pull/91697


More information about the llvm-commits mailing list