[llvm] e0fd2d6 - AMDGPU: Add baseline tests for 16-bit bswap

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 14 09:34:47 PST 2020


Author: Matt Arsenault
Date: 2020-02-14T09:34:13-08:00
New Revision: e0fd2d6d62a388ef193631afdb014dcb7214ca94

URL: https://github.com/llvm/llvm-project/commit/e0fd2d6d62a388ef193631afdb014dcb7214ca94
DIFF: https://github.com/llvm/llvm-project/commit/e0fd2d6d62a388ef193631afdb014dcb7214ca94.diff

LOG: AMDGPU: Add baseline tests for 16-bit bswap

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/bswap.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/bswap.ll b/llvm/test/CodeGen/AMDGPU/bswap.ll
index 473dc6050930..8fe7e62822c4 100644
--- a/llvm/test/CodeGen/AMDGPU/bswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/bswap.ll
@@ -3,6 +3,9 @@
 ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -check-prefixes=FUNC,GCN,VI
 
 declare i16 @llvm.bswap.i16(i16) nounwind readnone
+declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>) nounwind readnone
+declare <3 x i16> @llvm.bswap.v3i16(<3 x i16>) nounwind readnone
+declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>) nounwind readnone
 declare i32 @llvm.bswap.i32(i32) nounwind readnone
 declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>) nounwind readnone
 declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) nounwind readnone
@@ -10,6 +13,7 @@ declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>) nounwind readnone
 declare i64 @llvm.bswap.i64(i64) nounwind readnone
 declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) nounwind readnone
 declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>) nounwind readnone
+declare i48 @llvm.bswap.i48(i48) #1
 
 define amdgpu_kernel void @test_bswap_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
 ; SI-LABEL: test_bswap_i32:
@@ -381,3 +385,212 @@ bb:
   %tmp3 = fpext half %tmp2 to float
   ret float %tmp3
 }
+
+define i16 @v_bswap_i16(i16 %src) {
+; SI-LABEL: v_bswap_i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v1, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call i16 @llvm.bswap.i16(i16 %src)
+  ret i16 %bswap
+}
+
+define i32 @v_bswap_i16_zext_to_i32(i16 %src) {
+; SI-LABEL: v_bswap_i16_zext_to_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v1, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_i16_zext_to_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call i16 @llvm.bswap.i16(i16 %src)
+  %zext = zext i16 %bswap to i32
+  ret i32 %zext
+}
+
+define i32 @v_bswap_i16_sext_to_i32(i16 %src) {
+; SI-LABEL: v_bswap_i16_sext_to_i32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v1, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v1
+; SI-NEXT:    v_ashrrev_i32_e32 v0, 16, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_i16_sext_to_i32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_ashrrev_i32_e32 v0, 16, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call i16 @llvm.bswap.i16(i16 %src)
+  %zext = sext i16 %bswap to i32
+  ret i32 %zext
+}
+
+define <2 x i16> @v_bswap_v2i16(<2 x i16> %src) {
+; SI-LABEL: v_bswap_v2i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v2, v1, v1, 8
+; SI-NEXT:    v_alignbit_b32 v1, v1, v1, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_alignbit_b32 v3, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v3
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_v2i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_perm_b32 v1, 0, v1, s4
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %src)
+  ret <2 x i16> %bswap
+}
+
+define <3 x i16> @v_bswap_v3i16(<3 x i16> %src) {
+; SI-LABEL: v_bswap_v3i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v3, v1, v1, 8
+; SI-NEXT:    v_alignbit_b32 v1, v1, v1, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_alignbit_b32 v4, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    v_alignbit_b32 v5, v2, v2, 8
+; SI-NEXT:    v_alignbit_b32 v2, v2, v2, 24
+; SI-NEXT:    v_bfi_b32 v1, s4, v1, v3
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v4
+; SI-NEXT:    v_bfi_b32 v2, s4, v2, v5
+; SI-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_alignbit_b32 v1, v2, v0, 16
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_v3i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; VI-NEXT:    v_perm_b32 v1, 0, v1, s4
+; VI-NEXT:    v_perm_b32 v2, 0, v2, s4
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call <3 x i16> @llvm.bswap.v3i16(<3 x i16> %src)
+  ret <3 x i16> %bswap
+}
+
+define <4 x i16> @v_bswap_v4i16(<4 x i16> %src) {
+; SI-LABEL: v_bswap_v4i16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v4, v3, v3, 8
+; SI-NEXT:    v_alignbit_b32 v3, v3, v3, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    s_mov_b32 s5, 0xffff0000
+; SI-NEXT:    v_alignbit_b32 v5, v2, v2, 8
+; SI-NEXT:    v_alignbit_b32 v2, v2, v2, 24
+; SI-NEXT:    v_alignbit_b32 v6, v1, v1, 8
+; SI-NEXT:    v_alignbit_b32 v1, v1, v1, 24
+; SI-NEXT:    v_alignbit_b32 v7, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    v_bfi_b32 v3, s4, v3, v4
+; SI-NEXT:    v_bfi_b32 v2, s4, v2, v5
+; SI-NEXT:    v_bfi_b32 v1, s4, v1, v6
+; SI-NEXT:    v_bfi_b32 v0, s4, v0, v7
+; SI-NEXT:    v_and_b32_e32 v3, s5, v3
+; SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT:    v_or_b32_e32 v2, v2, v3
+; SI-NEXT:    v_or_b32_e32 v0, v0, v1
+; SI-NEXT:    v_alignbit_b32 v1, v2, v0, 16
+; SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_v4i16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v1
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
+; VI-NEXT:    v_perm_b32 v2, 0, v2, s4
+; VI-NEXT:    v_perm_b32 v1, 0, v1, s4
+; VI-NEXT:    v_perm_b32 v3, 0, v3, s4
+; VI-NEXT:    v_perm_b32 v0, 0, v0, s4
+; VI-NEXT:    v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %bswap = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %src)
+  ret <4 x i16> %bswap
+}
+
+define i64 @v_bswap_i48(i64 %src) {
+; SI-LABEL: v_bswap_i48:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_alignbit_b32 v2, v0, v0, 8
+; SI-NEXT:    v_alignbit_b32 v0, v0, v0, 24
+; SI-NEXT:    s_mov_b32 s4, 0xff00ff
+; SI-NEXT:    v_alignbit_b32 v3, v1, v1, 8
+; SI-NEXT:    v_alignbit_b32 v1, v1, v1, 24
+; SI-NEXT:    v_bfi_b32 v2, s4, v0, v2
+; SI-NEXT:    v_bfi_b32 v0, s4, v1, v3
+; SI-NEXT:    v_alignbit_b32 v0, v2, v0, 16
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: v_bswap_i48:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    s_mov_b32 s4, 0x10203
+; VI-NEXT:    v_perm_b32 v2, 0, v0, s4
+; VI-NEXT:    v_perm_b32 v0, 0, v1, s4
+; VI-NEXT:    v_alignbit_b32 v0, v2, v0, 16
+; VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
+; VI-NEXT:    s_setpc_b64 s[30:31]
+  %trunc = trunc i64 %src to i48
+  %bswap = call i48 @llvm.bswap.i48(i48 %trunc)
+  %zext = zext i48 %bswap to i64
+  ret i64 %zext
+}


        


More information about the llvm-commits mailing list