[llvm] cd538a6 - [AMDGPU] Precommit fused-bitlogic.ll test. NFC.

Stanislav Mekhanoshin via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 15 13:56:32 PDT 2021


Author: Stanislav Mekhanoshin
Date: 2021-10-15T13:56:24-07:00
New Revision: cd538a6b146217e8f4d9ba1123bd3f62026bc28c

URL: https://github.com/llvm/llvm-project/commit/cd538a6b146217e8f4d9ba1123bd3f62026bc28c
DIFF: https://github.com/llvm/llvm-project/commit/cd538a6b146217e8f4d9ba1123bd3f62026bc28c.diff

LOG: [AMDGPU] Precommit fused-bitlogic.ll test. NFC.

Added: 
    llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll
new file mode 100644
index 000000000000..654ed0faa11d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fused-bitlogic.ll
@@ -0,0 +1,349 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN %s
+
+define amdgpu_kernel void @divergent_or3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_or3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_not_b32_e32 v0, v0
+; GCN-NEXT:    global_store_dword v3, v0, s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = or i32 %i5, %i4
+  %i8 = or i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @divergent_or3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_or3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-NEXT:    v_or_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_not_b32_e32 v0, v0
+; GCN-NEXT:    v_not_b32_e32 v1, v1
+; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = or i64 %i5, %i4
+  %i8 = or i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+define amdgpu_kernel void @divergent_and3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_and3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_not_b32_e32 v0, v0
+; GCN-NEXT:    global_store_dword v3, v0, s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = and i32 %i5, %i4
+  %i8 = and i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @divergent_and3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_and3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_and_b32_e32 v1, v3, v1
+; GCN-NEXT:    v_and_b32_e32 v0, v2, v0
+; GCN-NEXT:    v_and_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_and_b32_e32 v1, v1, v5
+; GCN-NEXT:    v_not_b32_e32 v0, v0
+; GCN-NEXT:    v_not_b32_e32 v1, v1
+; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = and i64 %i5, %i4
+  %i8 = and i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+define amdgpu_kernel void @divergent_xor3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_xor3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v3, 4, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx3 v[0:2], v3, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_xnor_b32_e32 v0, v0, v2
+; GCN-NEXT:    global_store_dword v3, v0, s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %i2, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = xor i32 %i5, %i4
+  %i8 = xor i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %i2, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @divergent_xor3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: divergent_xor3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT:    v_lshlrev_b32_e32 v6, 5, v0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    global_load_dwordx2 v[4:5], v6, s[0:1] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v6, s[0:1]
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_xor_b32_e32 v1, v3, v1
+; GCN-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-NEXT:    v_xnor_b32_e32 v0, v0, v4
+; GCN-NEXT:    v_xnor_b32_e32 v1, v1, v5
+; GCN-NEXT:    global_store_dwordx2 v6, v[0:1], s[0:1]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %i1 = zext i32 %i to i64
+  %i2 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 %i1
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %i2, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = xor i64 %i5, %i4
+  %i8 = xor i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %i2, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+define amdgpu_kernel void @uniform_or3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_or3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_or_b32 s0, s1, s0
+; GCN-NEXT:    s_nor_b32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    global_store_dword v0, v1, s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = or i32 %i5, %i4
+  %i8 = or i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @uniform_or3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_or3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_nor_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = or i64 %i5, %i4
+  %i8 = or i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+define amdgpu_kernel void @uniform_and3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_and3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s0, s1, s0
+; GCN-NEXT:    s_nand_b32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    global_store_dword v0, v1, s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = and i32 %i5, %i4
+  %i8 = and i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @uniform_and3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_and3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_nand_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = and i64 %i5, %i4
+  %i8 = and i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+define amdgpu_kernel void @uniform_xor3_b32(<3 x i32> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_xor3_b32:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_xor_b32 s0, s1, s0
+; GCN-NEXT:    s_xnor_b32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    global_store_dword v0, v1, s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16
+  %i4 = extractelement <3 x i32> %i3, i64 0
+  %i5 = extractelement <3 x i32> %i3, i64 1
+  %i6 = extractelement <3 x i32> %i3, i64 2
+  %i7 = xor i32 %i5, %i4
+  %i8 = xor i32 %i7, %i6
+  %i9 = xor i32 %i8, -1
+  %i10 = getelementptr inbounds <3 x i32>, <3 x i32> addrspace(1)* %arg, i64 0, i64 0
+  store i32 %i9, i32 addrspace(1)* %i10, align 16
+  ret void
+}
+
+define amdgpu_kernel void @uniform_xor3_b64(<3 x i64> addrspace(1)* %arg) {
+; GCN-LABEL: uniform_xor3_b64:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x10
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT:    s_xnor_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
+; GCN-NEXT:    s_endpgm
+bb:
+  %i3 = load <3 x i64>, <3 x i64> addrspace(1)* %arg, align 32
+  %i4 = extractelement <3 x i64> %i3, i64 0
+  %i5 = extractelement <3 x i64> %i3, i64 1
+  %i6 = extractelement <3 x i64> %i3, i64 2
+  %i7 = xor i64 %i5, %i4
+  %i8 = xor i64 %i7, %i6
+  %i9 = xor i64 %i8, -1
+  %i10 = getelementptr inbounds <3 x i64>, <3 x i64> addrspace(1)* %arg, i64 0, i64 0
+  store i64 %i9, i64 addrspace(1)* %i10, align 32
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()


        


More information about the llvm-commits mailing list