[llvm-bugs] [Bug 28160] New: [x86, SSE] hoist vector cast ops to avoid unnecessary work?
via llvm-bugs
llvm-bugs at lists.llvm.org
Thu Jun 16 13:04:06 PDT 2016
https://llvm.org/bugs/show_bug.cgi?id=28160
Bug ID: 28160
Summary: [x86, SSE] hoist vector cast ops to avoid unnecessary
work?
Product: libraries
Version: trunk
Hardware: PC
OS: All
Status: NEW
Severity: normal
Priority: P
Component: Backend: X86
Assignee: unassignedbugs at nondot.org
Reporter: spatel+llvm at rotateright.com
CC: llvm-bugs at lists.llvm.org
Classification: Unclassified
Filing based on the example in http://reviews.llvm.org/D21190 (this problem
exists regardless of that patch):
define <4 x i32> @f(<4 x float> %a, <4 x float> %b, <4 x i64> %c, <4 x i64> %d)
{
%cmp = fcmp olt <4 x float> %a, %b
%tr1 = trunc <4 x i64> %c to <4 x i32>
%tr2 = trunc <4 x i64> %d to <4 x i32>
%sel = select <4 x i1> %cmp, <4 x i32> %tr1, <4 x i32> %tr2
ret <4 x i32> %sel
}
Instcombine (correctly I think) promotes the select ahead of the truncs to
reduce the number of IR instructions:
$ ./opt -instcombine trunc.ll -S
define <4 x i32> @f(<4 x float> %a, <4 x float> %b, <4 x i64> %c, <4 x i64> %d)
{
%cmp = fcmp olt <4 x float> %a, %b
%sel.v = select <4 x i1> %cmp, <4 x i64> %c, <4 x i64> %d
%sel = trunc <4 x i64> %sel.v to <4 x i32>
ret <4 x i32> %sel
}
On an SSE2 machine, this leads to much worse codegen:
$ ./opt trunc.ll -S |./llc -o -
cmpltps %xmm1, %xmm0
pshufd $232, %xmm3, %xmm1 ## xmm1 = xmm3[0,2,2,3]
pshufd $232, %xmm2, %xmm2 ## xmm2 = xmm2[0,2,2,3]
punpcklqdq %xmm1, %xmm2 ## xmm2 = xmm2[0],xmm1[0]
pshufd $232, %xmm5, %xmm1 ## xmm1 = xmm5[0,2,2,3]
pshufd $232, %xmm4, %xmm3 ## xmm3 = xmm4[0,2,2,3]
punpcklqdq %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0]
pand %xmm0, %xmm2
pandn %xmm3, %xmm0
por %xmm2, %xmm0
retq
.cfi_endproc
$ ./opt -instcombine trunc.ll -S |./llc -o -
movaps %xmm0, %xmm6
cmpltps %xmm1, %xmm6
pshufd $212, %xmm6, %xmm6 ## xmm6 = xmm6[0,1,1,3]
psllq $63, %xmm6
psrad $31, %xmm6
pshufd $245, %xmm6, %xmm6 ## xmm6 = xmm6[1,1,3,3]
shufpd $1, %xmm1, %xmm1 ## xmm1 = xmm1[1,0]
shufpd $1, %xmm0, %xmm0 ## xmm0 = xmm0[1,0]
cmpltps %xmm1, %xmm0
pshufd $212, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,3]
psllq $63, %xmm0
psrad $31, %xmm0
pshufd $245, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,3,3]
pand %xmm1, %xmm3
pandn %xmm5, %xmm1
por %xmm3, %xmm1
pand %xmm6, %xmm2
pandn %xmm4, %xmm6
por %xmm2, %xmm6
pshufd $232, %xmm6, %xmm0 ## xmm0 = xmm6[0,2,2,3]
pshufd $232, %xmm1, %xmm1 ## xmm1 = xmm1[0,2,2,3]
punpcklqdq %xmm1, %xmm0 ## xmm0 = xmm0[0],xmm1[0]
retq
Even with AVX2 where everything is legal, there's a codegen
difference...suggesting that there's still opportunity to earn a graduate
degree in x86 shuffle lowering:
$ ./opt trunc.ll -S |./llc -o - -mattr=avx2
vcmpltps %xmm1, %xmm0, %xmm0
vpshufd $136, %ymm2, %ymm1 ## ymm1 = ymm2[0,2,0,2,4,6,4,6]
vpermq $236, %ymm1, %ymm1 ## ymm1 = ymm1[0,3,2,3]
vpshufd $136, %ymm3, %ymm2 ## ymm2 = ymm3[0,2,0,2,4,6,4,6]
vpermq $236, %ymm2, %ymm2 ## ymm2 = ymm2[0,3,2,3]
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vzeroupper
retq
.cfi_endproc
.subsections_via_symbols
$ ./opt -instcombine trunc.ll -S |./llc -o - -mattr=avx2
vcmpltps %xmm1, %xmm0, %xmm0
vpmovsxdq %xmm0, %ymm0
vblendvpd %ymm0, %ymm2, %ymm3, %ymm0
vpshufd $136, %ymm0, %ymm0 ## ymm0 = ymm0[0,2,0,2,4,6,4,6]
vpermq $236, %ymm0, %ymm0 ## ymm0 = ymm0[0,3,2,3]
vzeroupper
retq
--
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20160616/064da2a1/attachment.html>
More information about the llvm-bugs
mailing list