[llvm] 0aa9a7f - [AMDGPU] Regenerate bfe-combine.ll and bfe-patterns.ll

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 17 06:41:32 PDT 2022


Author: Simon Pilgrim
Date: 2022-10-17T14:41:14+01:00
New Revision: 0aa9a7f8d93707c7a5f219ceedc8e191235bc84e

URL: https://github.com/llvm/llvm-project/commit/0aa9a7f8d93707c7a5f219ceedc8e191235bc84e
DIFF: https://github.com/llvm/llvm-project/commit/0aa9a7f8d93707c7a5f219ceedc8e191235bc84e.diff

LOG: [AMDGPU] Regenerate bfe-combine.ll and bfe-patterns.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/bfe-combine.ll
    llvm/test/CodeGen/AMDGPU/bfe-patterns.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/bfe-combine.ll b/llvm/test/CodeGen/AMDGPU/bfe-combine.ll
index 04d67abf0559..92bac604a6b6 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-combine.ll
@@ -1,18 +1,63 @@
-; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 < %s | FileCheck --check-prefix=GCN --check-prefix=VI %s
-; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck --check-prefix=GCN --check-prefix=VI-SDWA %s
-; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck --check-prefix=GCN --check-prefix=CI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 < %s | FileCheck %s --check-prefixes=VI
+; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck %s --check-prefixes=VI-SDWA
+; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck %s --check-prefixes=CI
 
-; GCN-LABEL: {{^}}bfe_combine8:
-; VI: v_bfe_u32 v[[BFE:[0-9]+]], v{{[0-9]+}}, 8, 8
-; VI: v_lshlrev_b32_e32 v[[ADDRBASE:[0-9]+]], 2, v[[BFE]]
-; VI-SDWA: v_mov_b32_e32 v[[SHIFT:[0-9]+]], 2
-; VI-SDWA: v_lshlrev_b32_sdwa v[[ADDRBASE:[0-9]+]], v[[SHIFT]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; CI: v_lshrrev_b32_e32 v[[SHR:[0-9]+]], 6, v{{[0-9]+}}
-; CI: v_and_b32_e32 v[[ADDRLO:[0-9]+]], 0x3fc, v[[SHR]]
-; VI: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADDRBASE]]
-; VI-SDWA: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADDRBASE]]
-; GCN: load_dword v{{[0-9]+}}, v[[[ADDRLO]]:
 define amdgpu_kernel void @bfe_combine8(i32 addrspace(1)* nocapture %arg, i32 %x) {
+; VI-LABEL: bfe_combine8:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_bfe_u32 v0, v0, 8, 8
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v2, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; VI-SDWA-LABEL: bfe_combine8:
+; VI-SDWA:       ; %bb.0:
+; VI-SDWA-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-SDWA-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, 2
+; VI-SDWA-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-SDWA-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-SDWA-NEXT:    v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, s1
+; VI-SDWA-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-SDWA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SDWA-NEXT:    flat_load_dword v2, v[0:1]
+; VI-SDWA-NEXT:    v_mov_b32_e32 v0, s0
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, s1
+; VI-SDWA-NEXT:    s_waitcnt vmcnt(0)
+; VI-SDWA-NEXT:    flat_store_dword v[0:1], v2
+; VI-SDWA-NEXT:    s_endpgm
+;
+; CI-LABEL: bfe_combine8:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; CI-NEXT:    s_mov_b32 s3, 0xf000
+; CI-NEXT:    s_mov_b32 s6, 0
+; CI-NEXT:    s_mov_b32 s7, s3
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; CI-NEXT:    v_lshrrev_b32_e32 v0, 6, v0
+; CI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; CI-NEXT:    v_and_b32_e32 v0, 0x3fc, v0
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    buffer_load_dword v0, v[0:1], s[4:7], 0 addr64
+; CI-NEXT:    s_mov_b32 s2, -1
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; CI-NEXT:    s_endpgm
   %id = tail call i32 @llvm.amdgcn.workitem.id.x() #2
   %idx = add i32 %x, %id
   %srl = lshr i32 %idx, 8
@@ -23,19 +68,66 @@ define amdgpu_kernel void @bfe_combine8(i32 addrspace(1)* nocapture %arg, i32 %x
   ret void
 }
 
-; GCN-LABEL: {{^}}bfe_combine16:
-; VI: v_bfe_u32 v[[BFE:[0-9]+]], v{{[0-9]+}}, 16, 16
-; VI: v_lshlrev_b32_e32 v[[ADDRBASE:[0-9]+]], {{[^,]+}}, v[[BFE]]
-; VI-SDWA: v_mov_b32_e32 v[[SHIFT:[0-9]+]], 15
-; VI-SDWA: v_lshlrev_b32_sdwa v[[ADDRBASE1:[0-9]+]], v[[SHIFT]], v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-SDWA: v_lshlrev_b64 v[[[ADDRBASE:[0-9]+]]:{{[^\]+}}], 2, v[[[ADDRBASE1]]:{{[^\]+}}]
-; VI-SDWA: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADDRBASE]]
-; CI: v_lshrrev_b32_e32 v[[SHR:[0-9]+]], 1, v{{[0-9]+}}
-; CI: v_and_b32_e32 v[[AND:[0-9]+]], 0x7fff8000, v[[SHR]]
-; CI: v_lshl_b64 v[[[ADDRLO:[0-9]+]]:{{[^\]+}}], v[[[AND]]:{{[^\]+}}], 2
-; VI: v_add_u32_e32 v[[ADDRLO:[0-9]+]], vcc, s{{[0-9]+}}, v[[ADDRBASE]]
-; GCN: load_dword v{{[0-9]+}}, v[[[ADDRLO]]:
 define amdgpu_kernel void @bfe_combine16(i32 addrspace(1)* nocapture %arg, i32 %x) {
+; VI-LABEL: bfe_combine16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-NEXT:    v_bfe_u32 v0, v0, 16, 16
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 15, v0
+; VI-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; VI-NEXT:    flat_load_dword v2, v[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+;
+; VI-SDWA-LABEL: bfe_combine16:
+; VI-SDWA:       ; %bb.0:
+; VI-SDWA-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; VI-SDWA-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, 15
+; VI-SDWA-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-SDWA-NEXT:    v_add_u32_e32 v0, vcc, s2, v0
+; VI-SDWA-NEXT:    v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, 0
+; VI-SDWA-NEXT:    v_lshlrev_b64 v[0:1], 2, v[0:1]
+; VI-SDWA-NEXT:    v_mov_b32_e32 v2, s1
+; VI-SDWA-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-SDWA-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; VI-SDWA-NEXT:    flat_load_dword v2, v[0:1]
+; VI-SDWA-NEXT:    v_mov_b32_e32 v0, s0
+; VI-SDWA-NEXT:    v_mov_b32_e32 v1, s1
+; VI-SDWA-NEXT:    s_waitcnt vmcnt(0)
+; VI-SDWA-NEXT:    flat_store_dword v[0:1], v2
+; VI-SDWA-NEXT:    s_endpgm
+;
+; CI-LABEL: bfe_combine16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dword s2, s[0:1], 0xb
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; CI-NEXT:    v_mov_b32_e32 v1, 0
+; CI-NEXT:    s_mov_b32 s3, 0xf000
+; CI-NEXT:    s_mov_b32 s6, 0
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
+; CI-NEXT:    v_lshrrev_b32_e32 v0, 1, v0
+; CI-NEXT:    v_and_b32_e32 v0, 0x7fff8000, v0
+; CI-NEXT:    v_lshl_b64 v[0:1], v[0:1], 2
+; CI-NEXT:    s_mov_b32 s7, s3
+; CI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; CI-NEXT:    buffer_load_dword v0, v[0:1], s[4:7], 0 addr64
+; CI-NEXT:    s_mov_b32 s2, -1
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; CI-NEXT:    s_endpgm
   %id = tail call i32 @llvm.amdgcn.workitem.id.x() #2
   %idx = add i32 %x, %id
   %srl = lshr i32 %idx, 1

diff  --git a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
index 95ec67906f11..f7c2d69706bd 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
@@ -1,11 +1,44 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
 
-; GCN-LABEL: {{^}}v_ubfe_sub_i32:
-; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
-; GCN: v_bfe_u32 v{{[0-9]+}}, [[SRC]], 0, [[WIDTH]]
 define amdgpu_kernel void @v_ubfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+; SI-LABEL: v_ubfe_sub_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    v_bfe_u32 v2, v2, 0, v3
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_ubfe_sub_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_bfe_u32 v2, v3, 0, v4
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
   %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
@@ -19,17 +52,52 @@ define amdgpu_kernel void @v_ubfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(
   ret void
 }
 
-; GCN-LABEL: {{^}}v_ubfe_sub_multi_use_shl_i32:
-; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
-; GCN: v_sub_{{[iu]}}32_e32 [[SUB:v[0-9]+]], vcc, 32, [[WIDTH]]
-
-; GCN-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]]
-; GCN-NEXT: v_lshrrev_b32_e32 [[BFE:v[0-9]+]], [[SUB]], [[SHL]]
-
-; GCN: [[BFE]]
-; GCN: [[SHL]]
 define amdgpu_kernel void @v_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+; SI-LABEL: v_ubfe_sub_multi_use_shl_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_sub_i32_e32 v3, vcc, 32, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v2, v3, v2
+; SI-NEXT:    v_lshrrev_b32_e32 v3, v3, v2
+; SI-NEXT:    buffer_store_dword v3, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_ubfe_sub_multi_use_shl_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_sub_u32_e32 v2, vcc, 32, v4
+; VI-NEXT:    v_lshlrev_b32_e32 v3, v2, v3
+; VI-NEXT:    v_lshrrev_b32_e32 v2, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
   %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
@@ -44,12 +112,37 @@ define amdgpu_kernel void @v_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out,
   ret void
 }
 
-; GCN-LABEL: {{^}}s_ubfe_sub_i32:
-; GCN: s_load_dwordx4 s[[[#LOAD:]]:[[END:[0-9]+]]], s[0:1], {{0x9|0x24}}
-; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, s[[#LOAD + 3]]
-; GCN: s_lshl_b32 [[TMP:s[0-9]+]], s[[#LOAD + 2]], [[SUB]]
-; GCN: s_lshr_b32 s{{[0-9]+}}, [[TMP]], [[SUB]]
 define amdgpu_kernel void @s_ubfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+; SI-LABEL: s_ubfe_sub_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sub_i32 s3, 32, s3
+; SI-NEXT:    s_lshl_b32 s2, s2, s3
+; SI-NEXT:    s_lshr_b32 s2, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_ubfe_sub_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    s_sub_i32 s0, 32, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshl_b32 s1, s2, s0
+; VI-NEXT:    s_lshr_b32 s0, s1, s0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
   %sub = sub i32 32, %width
@@ -59,12 +152,44 @@ define amdgpu_kernel void @s_ubfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32
   ret void
 }
 
-; GCN-LABEL: {{^}}s_ubfe_sub_multi_use_shl_i32:
-; GCN: s_load_dwordx4 s[[[#LOAD:]]:[[END:[0-9]+]]], s[0:1], {{0x9|0x24}}
-; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, s[[#LOAD + 3]]
-; GCN: s_lshl_b32 [[SHL:s[0-9]+]], s[[#LOAD + 2]], [[SUB]]
-; GCN: s_lshr_b32 s{{[0-9]+}}, [[SHL]], [[SUB]]
 define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+; SI-LABEL: s_ubfe_sub_multi_use_shl_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sub_i32 s3, 32, s3
+; SI-NEXT:    s_lshl_b32 s2, s2, s3
+; SI-NEXT:    s_lshr_b32 s3, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_ubfe_sub_multi_use_shl_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    s_sub_i32 s0, 32, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshl_b32 s1, s2, s0
+; VI-NEXT:    s_lshr_b32 s0, s1, s0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    flat_store_dword v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
   %sub = sub i32 32, %width
@@ -75,11 +200,43 @@ define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out,
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sbfe_sub_i32:
-; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
-; GCN: v_bfe_i32 v{{[0-9]+}}, [[SRC]], 0, [[WIDTH]]
 define amdgpu_kernel void @v_sbfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+; SI-LABEL: v_sbfe_sub_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    v_bfe_i32 v2, v2, 0, v3
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_sbfe_sub_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_bfe_i32 v2, v3, 0, v4
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
   %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
@@ -93,17 +250,52 @@ define amdgpu_kernel void @v_sbfe_sub_i32(i32 addrspace(1)* %out, i32 addrspace(
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sbfe_sub_multi_use_shl_i32:
-; GCN: {{buffer|flat}}_load_dword [[SRC:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[WIDTH:v[0-9]+]]
-; GCN: v_sub_{{[iu]}}32_e32 [[SUB:v[0-9]+]], vcc, 32, [[WIDTH]]
-
-; GCN-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]]
-; GCN-NEXT: v_ashrrev_i32_e32 [[BFE:v[0-9]+]], [[SUB]], [[SHL]]
-
-; GCN: [[BFE]]
-; GCN: [[SHL]]
 define amdgpu_kernel void @v_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #1 {
+; SI-LABEL: v_sbfe_sub_multi_use_shl_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT:    buffer_load_dword v2, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_sub_i32_e32 v3, vcc, 32, v3
+; SI-NEXT:    v_lshlrev_b32_e32 v2, v3, v2
+; SI-NEXT:    v_ashrrev_i32_e32 v3, v3, v2
+; SI-NEXT:    buffer_store_dword v3, v[0:1], s[0:3], 0 addr64
+; SI-NEXT:    buffer_store_dword v2, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_sbfe_sub_multi_use_shl_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_sub_u32_e32 v2, vcc, 32, v4
+; VI-NEXT:    v_lshlrev_b32_e32 v3, v2, v3
+; VI-NEXT:    v_ashrrev_i32_e32 v2, v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %in0.gep = getelementptr i32, i32 addrspace(1)* %in0, i32 %id.x
   %in1.gep = getelementptr i32, i32 addrspace(1)* %in1, i32 %id.x
@@ -118,12 +310,37 @@ define amdgpu_kernel void @v_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out,
   ret void
 }
 
-; GCN-LABEL: {{^}}s_sbfe_sub_i32:
-; GCN: s_load_dwordx4 s[[[#LOAD:]]:[[END:[0-9]+]]], s[0:1], {{0x9|0x24}}
-; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, s[[#LOAD + 3]]
-; GCN: s_lshl_b32 [[TMP:s[0-9]+]], s[[#LOAD + 2]], [[SUB]]
-; GCN: s_ashr_i32 s{{[0-9]+}}, [[TMP]], [[SUB]]
 define amdgpu_kernel void @s_sbfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+; SI-LABEL: s_sbfe_sub_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sub_i32 s3, 32, s3
+; SI-NEXT:    s_lshl_b32 s2, s2, s3
+; SI-NEXT:    s_ashr_i32 s2, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_sbfe_sub_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    s_sub_i32 s0, 32, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshl_b32 s1, s2, s0
+; VI-NEXT:    s_ashr_i32 s0, s1, s0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
   %sub = sub i32 32, %width
@@ -133,12 +350,44 @@ define amdgpu_kernel void @s_sbfe_sub_i32(i32 addrspace(1)* %out, i32 %src, i32
   ret void
 }
 
-; GCN-LABEL: {{^}}s_sbfe_sub_multi_use_shl_i32:
-; GCN: s_load_dwordx4 s[[[#LOAD:]]:[[END:[0-9]+]]], s[0:1], {{0x9|0x24}}
-; GCN: s_sub_i32 [[SUB:s[0-9]+]], 32, s[[#LOAD + 3]]
-; GCN: s_lshl_b32 [[SHL:s[0-9]+]], s[[#LOAD + 2]], [[SUB]]
-; GCN: s_ashr_i32 s{{[0-9]+}}, [[SHL]], [[SUB]]
 define amdgpu_kernel void @s_sbfe_sub_multi_use_shl_i32(i32 addrspace(1)* %out, i32 %src, i32 %width) #1 {
+; SI-LABEL: s_sbfe_sub_multi_use_shl_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s6, 0
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_sub_i32 s3, 32, s3
+; SI-NEXT:    s_lshl_b32 s2, s2, s3
+; SI-NEXT:    s_ashr_i32 s3, s2, s3
+; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: s_sbfe_sub_multi_use_shl_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    s_sub_i32 s0, 32, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_lshl_b32 s1, s2, s0
+; VI-NEXT:    s_ashr_i32 s0, s1, s0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    flat_store_dword v[0:1], v0
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
   %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id.x
   %sub = sub i32 32, %width


        


More information about the llvm-commits mailing list