<html>
<head>
<base href="https://llvm.org/bugs/" />
</head>
<body><table border="1" cellspacing="0" cellpadding="8">
<tr>
<th>Bug ID</th>
<td><a class="bz_bug_link
bz_status_NEW "
title="NEW --- - [x86, SSE] hoist vector cast ops to avoid unnecessary work?"
href="https://llvm.org/bugs/show_bug.cgi?id=28160">28160</a>
</td>
</tr>
<tr>
<th>Summary</th>
<td>[x86, SSE] hoist vector cast ops to avoid unnecessary work?
</td>
</tr>
<tr>
<th>Product</th>
<td>libraries
</td>
</tr>
<tr>
<th>Version</th>
<td>trunk
</td>
</tr>
<tr>
<th>Hardware</th>
<td>PC
</td>
</tr>
<tr>
<th>OS</th>
<td>All
</td>
</tr>
<tr>
<th>Status</th>
<td>NEW
</td>
</tr>
<tr>
<th>Severity</th>
<td>normal
</td>
</tr>
<tr>
<th>Priority</th>
<td>P
</td>
</tr>
<tr>
<th>Component</th>
<td>Backend: X86
</td>
</tr>
<tr>
<th>Assignee</th>
<td>unassignedbugs@nondot.org
</td>
</tr>
<tr>
<th>Reporter</th>
<td>spatel+llvm@rotateright.com
</td>
</tr>
<tr>
<th>CC</th>
<td>llvm-bugs@lists.llvm.org
</td>
</tr>
<tr>
<th>Classification</th>
<td>Unclassified
</td>
</tr></table>
<p>
<div>
<pre>Filing based on the example in <a href="http://reviews.llvm.org/D21190">http://reviews.llvm.org/D21190</a> (this problem
exists regardless of that patch):
define <4 x i32> @f(<4 x float> %a, <4 x float> %b, <4 x i64> %c, <4 x i64> %d)
{
%cmp = fcmp olt <4 x float> %a, %b
%tr1 = trunc <4 x i64> %c to <4 x i32>
%tr2 = trunc <4 x i64> %d to <4 x i32>
%sel = select <4 x i1> %cmp, <4 x i32> %tr1, <4 x i32> %tr2
ret <4 x i32> %sel
}
Instcombine (correctly I think) promotes the select ahead of the truncs to
reduce the number of IR instructions:
$ ./opt -instcombine trunc.ll -S
define <4 x i32> @f(<4 x float> %a, <4 x float> %b, <4 x i64> %c, <4 x i64> %d)
{
%cmp = fcmp olt <4 x float> %a, %b
%sel.v = select <4 x i1> %cmp, <4 x i64> %c, <4 x i64> %d
%sel = trunc <4 x i64> %sel.v to <4 x i32>
ret <4 x i32> %sel
}
On an SSE2 machine, this leads to much worse codegen:
$ ./opt trunc.ll -S |./llc -o -
cmpltps %xmm1, %xmm0
pshufd $232, %xmm3, %xmm1 ## xmm1 = xmm3[0,2,2,3]
pshufd $232, %xmm2, %xmm2 ## xmm2 = xmm2[0,2,2,3]
punpcklqdq %xmm1, %xmm2 ## xmm2 = xmm2[0],xmm1[0]
pshufd $232, %xmm5, %xmm1 ## xmm1 = xmm5[0,2,2,3]
pshufd $232, %xmm4, %xmm3 ## xmm3 = xmm4[0,2,2,3]
punpcklqdq %xmm1, %xmm3 ## xmm3 = xmm3[0],xmm1[0]
pand %xmm0, %xmm2
pandn %xmm3, %xmm0
por %xmm2, %xmm0
retq
.cfi_endproc
$ ./opt -instcombine trunc.ll -S |./llc -o -
movaps %xmm0, %xmm6
cmpltps %xmm1, %xmm6
pshufd $212, %xmm6, %xmm6 ## xmm6 = xmm6[0,1,1,3]
psllq $63, %xmm6
psrad $31, %xmm6
pshufd $245, %xmm6, %xmm6 ## xmm6 = xmm6[1,1,3,3]
shufpd $1, %xmm1, %xmm1 ## xmm1 = xmm1[1,0]
shufpd $1, %xmm0, %xmm0 ## xmm0 = xmm0[1,0]
cmpltps %xmm1, %xmm0
pshufd $212, %xmm0, %xmm0 ## xmm0 = xmm0[0,1,1,3]
psllq $63, %xmm0
psrad $31, %xmm0
pshufd $245, %xmm0, %xmm1 ## xmm1 = xmm0[1,1,3,3]
pand %xmm1, %xmm3
pandn %xmm5, %xmm1
por %xmm3, %xmm1
pand %xmm6, %xmm2
pandn %xmm4, %xmm6
por %xmm2, %xmm6
pshufd $232, %xmm6, %xmm0 ## xmm0 = xmm6[0,2,2,3]
pshufd $232, %xmm1, %xmm1 ## xmm1 = xmm1[0,2,2,3]
punpcklqdq %xmm1, %xmm0 ## xmm0 = xmm0[0],xmm1[0]
retq
Even with AVX2 where everything is legal, there's a codegen
difference...suggesting that there's still opportunity to earn a graduate
degree in x86 shuffle lowering:
$ ./opt trunc.ll -S |./llc -o - -mattr=avx2
vcmpltps %xmm1, %xmm0, %xmm0
vpshufd $136, %ymm2, %ymm1 ## ymm1 = ymm2[0,2,0,2,4,6,4,6]
vpermq $236, %ymm1, %ymm1 ## ymm1 = ymm1[0,3,2,3]
vpshufd $136, %ymm3, %ymm2 ## ymm2 = ymm3[0,2,0,2,4,6,4,6]
vpermq $236, %ymm2, %ymm2 ## ymm2 = ymm2[0,3,2,3]
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vzeroupper
retq
.cfi_endproc
.subsections_via_symbols
$ ./opt -instcombine trunc.ll -S |./llc -o - -mattr=avx2
vcmpltps %xmm1, %xmm0, %xmm0
vpmovsxdq %xmm0, %ymm0
vblendvpd %ymm0, %ymm2, %ymm3, %ymm0
vpshufd $136, %ymm0, %ymm0 ## ymm0 = ymm0[0,2,0,2,4,6,4,6]
vpermq $236, %ymm0, %ymm0 ## ymm0 = ymm0[0,3,2,3]
vzeroupper
retq</pre>
</div>
</p>
<hr>
<span>You are receiving this mail because:</span>
<ul>
<li>You are on the CC list for the bug.</li>
</ul>
</body>
</html>