[llvm] 2d20fb0 - [X86] Add test coverage for sext/zext/bswap/bitreverse with freeze
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 8 07:44:12 PDT 2022
Author: Simon Pilgrim
Date: 2022-08-08T15:44:00+01:00
New Revision: 2d20fb00b3c7e455c7ba668ff55efc6515bce383
URL: https://github.com/llvm/llvm-project/commit/2d20fb00b3c7e455c7ba668ff55efc6515bce383
DIFF: https://github.com/llvm/llvm-project/commit/2d20fb00b3c7e455c7ba668ff55efc6515bce383.diff
LOG: [X86] Add test coverage for sext/zext/bswap/bitreverse with freeze
All of these are safe to fold from freeze(unaryop(x)) -> unaryop(freeze(x))
Added:
llvm/test/CodeGen/X86/freeze-unary.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/freeze-unary.ll b/llvm/test/CodeGen/X86/freeze-unary.ll
new file mode 100644
index 000000000000..f78fa140db2a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/freeze-unary.ll
@@ -0,0 +1,313 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=X64
+
+define i32 @freeze_sext(i8 %a0) nounwind {
+; X86-LABEL: freeze_sext:
+; X86: # %bb.0:
+; X86-NEXT: movsbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cwtl
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_sext:
+; X64: # %bb.0:
+; X64-NEXT: movsbl %dil, %eax
+; X64-NEXT: cwtl
+; X64-NEXT: retq
+ %x = sext i8 %a0 to i16
+ %y = freeze i16 %x
+ %z = sext i16 %y to i32
+ ret i32 %z
+}
+
+define <4 x i32> @freeze_sext_vec(<4 x i8> %a0) nounwind {
+; X86-LABEL: freeze_sext_vec:
+; X86: # %bb.0:
+; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X86-NEXT: psraw $8, %xmm0
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: psrad $16, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_sext_vec:
+; X64: # %bb.0:
+; X64-NEXT: pmovsxbw %xmm0, %xmm0
+; X64-NEXT: pmovsxwd %xmm0, %xmm0
+; X64-NEXT: retq
+ %x = sext <4 x i8> %a0 to <4 x i16>
+ %y = freeze <4 x i16> %x
+ %z = sext <4 x i16> %y to <4 x i32>
+ ret <4 x i32> %z
+}
+
+define i32 @freeze_zext(i8 %a0) nounwind {
+; X86-LABEL: freeze_zext:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_zext:
+; X64: # %bb.0:
+; X64-NEXT: movzbl %dil, %eax
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: retq
+ %x = zext i8 %a0 to i16
+ %y = freeze i16 %x
+ %z = zext i16 %y to i32
+ ret i32 %z
+}
+
+define <2 x i64> @freeze_zext_vec(<2 x i16> %a0) nounwind {
+; X86-LABEL: freeze_zext_vec:
+; X86: # %bb.0:
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_zext_vec:
+; X64: # %bb.0:
+; X64-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; X64-NEXT: retq
+ %x = zext <2 x i16> %a0 to <2 x i32>
+ %y = freeze <2 x i32> %x
+ %z = zext <2 x i32> %y to <2 x i64>
+ ret <2 x i64> %z
+}
+
+define i32 @freeze_bswap(i32 %a0) nounwind {
+; X86-LABEL: freeze_bswap:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_bswap:
+; X64: # %bb.0:
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %eax
+; X64-NEXT: retq
+ %x = call i32 @llvm.bswap.i32(i32 %a0)
+ %y = freeze i32 %x
+ %z = call i32 @llvm.bswap.i32(i32 %y)
+ ret i32 %z
+}
+declare i32 @llvm.bswap.i32(i32)
+
+define <4 x i32> @freeze_bswap_vec(<4 x i32> %a0) nounwind {
+; X86-LABEL: freeze_bswap_vec:
+; X86: # %bb.0:
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X86-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X86-NEXT: packuswb %xmm2, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X86-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X86-NEXT: packuswb %xmm2, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_bswap_vec:
+; X64: # %bb.0:
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT: pshufb %xmm1, %xmm0
+; X64-NEXT: pshufb %xmm1, %xmm0
+; X64-NEXT: retq
+ %x = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %a0)
+ %y = freeze <4 x i32> %x
+ %z = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %y)
+ ret <4 x i32> %z
+}
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
+
+define i32 @freeze_bitreverse(i32 %a0) nounwind {
+; X86-LABEL: freeze_bitreverse:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT: leal (%eax,%ecx,4), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT: shrl %eax
+; X86-NEXT: andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; X86-NEXT: shll $4, %ecx
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT: shrl $2, %eax
+; X86-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X86-NEXT: leal (%eax,%ecx,4), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X86-NEXT: shrl %eax
+; X86-NEXT: andl $1431655765, %eax # imm = 0x55555555
+; X86-NEXT: leal (%eax,%ecx,2), %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_bitreverse:
+; X64: # %bb.0:
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: bswapl %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT: shll $4, %eax
+; X64-NEXT: shrl $4, %edi
+; X64-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F
+; X64-NEXT: orl %eax, %edi
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT: shrl $2, %edi
+; X64-NEXT: andl $858993459, %edi # imm = 0x33333333
+; X64-NEXT: leal (%rdi,%rax,4), %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X64-NEXT: shrl %eax
+; X64-NEXT: andl $1431655765, %eax # imm = 0x55555555
+; X64-NEXT: leal (%rax,%rcx,2), %eax
+; X64-NEXT: bswapl %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $252645135, %ecx # imm = 0xF0F0F0F
+; X64-NEXT: shll $4, %ecx
+; X64-NEXT: shrl $4, %eax
+; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
+; X64-NEXT: orl %ecx, %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $858993459, %ecx # imm = 0x33333333
+; X64-NEXT: shrl $2, %eax
+; X64-NEXT: andl $858993459, %eax # imm = 0x33333333
+; X64-NEXT: leal (%rax,%rcx,4), %eax
+; X64-NEXT: movl %eax, %ecx
+; X64-NEXT: andl $1431655765, %ecx # imm = 0x55555555
+; X64-NEXT: shrl %eax
+; X64-NEXT: andl $1431655765, %eax # imm = 0x55555555
+; X64-NEXT: leal (%rax,%rcx,2), %eax
+; X64-NEXT: retq
+ %x = call i32 @llvm.bitreverse.i32(i32 %a0)
+ %y = freeze i32 %x
+ %z = call i32 @llvm.bitreverse.i32(i32 %y)
+ ret i32 %z
+}
+declare i32 @llvm.bitreverse.i32(i32)
+
+define <4 x i32> @freeze_bitreverse_vec(<4 x i32> %a0) nounwind {
+; X86-LABEL: freeze_bitreverse_vec:
+; X86: # %bb.0:
+; X86-NEXT: pxor %xmm1, %xmm1
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
+; X86-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
+; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X86-NEXT: packuswb %xmm2, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm3
+; X86-NEXT: psrlw $4, %xmm3
+; X86-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-NEXT: pand %xmm2, %xmm3
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: psllw $4, %xmm0
+; X86-NEXT: por %xmm3, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm4
+; X86-NEXT: psrlw $2, %xmm4
+; X86-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
+; X86-NEXT: pand %xmm3, %xmm4
+; X86-NEXT: pand %xmm3, %xmm0
+; X86-NEXT: psllw $2, %xmm0
+; X86-NEXT: por %xmm4, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm5
+; X86-NEXT: psrlw $1, %xmm5
+; X86-NEXT: movdqa {{.*#+}} xmm4 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
+; X86-NEXT: pand %xmm4, %xmm5
+; X86-NEXT: pand %xmm4, %xmm0
+; X86-NEXT: paddb %xmm0, %xmm0
+; X86-NEXT: por %xmm5, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm5
+; X86-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
+; X86-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,5,4]
+; X86-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
+; X86-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
+; X86-NEXT: packuswb %xmm5, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psrlw $4, %xmm1
+; X86-NEXT: pand %xmm2, %xmm1
+; X86-NEXT: pand %xmm2, %xmm0
+; X86-NEXT: psllw $4, %xmm0
+; X86-NEXT: por %xmm1, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psrlw $2, %xmm1
+; X86-NEXT: pand %xmm3, %xmm1
+; X86-NEXT: pand %xmm3, %xmm0
+; X86-NEXT: psllw $2, %xmm0
+; X86-NEXT: por %xmm1, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psrlw $1, %xmm1
+; X86-NEXT: pand %xmm4, %xmm1
+; X86-NEXT: pand %xmm4, %xmm0
+; X86-NEXT: paddb %xmm0, %xmm0
+; X86-NEXT: por %xmm1, %xmm0
+; X86-NEXT: retl
+;
+; X64-LABEL: freeze_bitreverse_vec:
+; X64: # %bb.0:
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; X64-NEXT: pshufb %xmm2, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X64-NEXT: movdqa %xmm0, %xmm1
+; X64-NEXT: pand %xmm3, %xmm1
+; X64-NEXT: movdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; X64-NEXT: movdqa %xmm4, %xmm5
+; X64-NEXT: pshufb %xmm1, %xmm5
+; X64-NEXT: psrlw $4, %xmm0
+; X64-NEXT: pand %xmm3, %xmm0
+; X64-NEXT: movdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; X64-NEXT: movdqa %xmm1, %xmm6
+; X64-NEXT: pshufb %xmm0, %xmm6
+; X64-NEXT: por %xmm5, %xmm6
+; X64-NEXT: pshufb %xmm2, %xmm6
+; X64-NEXT: movdqa %xmm6, %xmm0
+; X64-NEXT: pand %xmm3, %xmm0
+; X64-NEXT: pshufb %xmm0, %xmm4
+; X64-NEXT: psrlw $4, %xmm6
+; X64-NEXT: pand %xmm3, %xmm6
+; X64-NEXT: pshufb %xmm6, %xmm1
+; X64-NEXT: por %xmm4, %xmm1
+; X64-NEXT: movdqa %xmm1, %xmm0
+; X64-NEXT: retq
+ %x = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a0)
+ %y = freeze <4 x i32> %x
+ %z = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %y)
+ ret <4 x i32> %z
+}
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
More information about the llvm-commits
mailing list