[llvm-bugs] [Bug 34704] New: BZHI peephole doesn't find all the use-cases, and doesn't optimize away masking / range-check
via llvm-bugs
llvm-bugs at lists.llvm.org
Fri Sep 22 10:58:39 PDT 2017
https://bugs.llvm.org/show_bug.cgi?id=34704
Bug ID: 34704
Summary: BZHI peephole doesn't find all the use-cases, and
doesn't optimize away masking / range-check
Product: libraries
Version: trunk
Hardware: PC
OS: Linux
Status: NEW
Severity: enhancement
Priority: P
Component: Backend: X86
Assignee: unassignedbugs at nondot.org
Reporter: peter at cordes.ca
CC: llvm-bugs at lists.llvm.org
unsigned bzhi_exact(unsigned x, unsigned c) {
c &= 0xff;
if (c <= 31) { // model actual bzhi behaviour
x &= ((1U << c) - 1);
// 1UL defeats clang's peephole
}
return x;
}
// https://godbolt.org/g/jSQA44
// clang 6.0.0 (trunk 313965)
movzbl %sil, %eax
cmpl $31, %eax
ja .LBB0_2
bzhil %esi, %edi, %edi
.LBB0_2:
movl %edi, %eax # doesn't need to be on the critical path for
the not-taken path
retq
But this can be compiled to just
bzhil %esi, %edi, %eax
ret
because BZHI (http://felixcloutier.com/x86/BZHI.html) takes the index from the
low 8 bits of src2, and saturates to OperandSize. It copies src1 unmodified
for index >= 32. (The text Description section says it saturates to
OperandSize-1. I tested on Skylake, and the Operation section is correct:
(index&0xFF) >= 32 leaves 0xFFFFFFFF unmodified.)
For another case:
// a more likely use-case to avoid shift C UB for c from 1 to 32
unsigned bzhi_1_to_32(unsigned x, unsigned c) {
if (c != 32) {
x &= ((1U << c) - 1);
}
return x;
}
cmpl $32, %esi
je .LBB2_2
bzhil %esi, %edi, %edi
.LBB2_2:
movl %edi, %eax
retq
But we can also compile that to just BZHI, because c > 32 results in shift UB
so we can do whatever we want, even if it's different from the behaviour of the
no-BMI2 asm output.
There are two other ways to express BZHI which clang fails to peephole at all:
unsigned bzhi2(unsigned x, unsigned c) {
// c &= 0xff;
// if(c < 32) {
x &= (0xFFFFFFFFUL >> (32-c));
// }
return x;
}
movl $32, %eax
subl %esi, %eax
movl $4294967295, %ecx # imm = 0xFFFFFFFF
shrxq %rax, %rcx, %rax
andl %edi, %eax
retq
unsigned bzhi3(unsigned long x, unsigned c) {
c &= 0xff;
return x & ~(-1U << c);
}
movl $-1, %eax
shlxl %esi, %eax, %eax
andnl %edi, %eax, %eax
retq
Similarly, this also fails to peephole to BZHI, because of the 1ULL:
unsigned bzhi_0_to_63(unsigned x, unsigned c) {
return x & ((1ULL << c) - 1);
}
movl $1, %eax # $-1, %rax would allow ANDN like bzhi3
shlxq %rsi, %rax, %rax
addl $-1, %eax
andl %edi, %eax
retq
For the range of shift counts that have defined behaviour in C (0 to 63), this
function gives the same result as BZHI, so it's still a valid optimization.
(And a useful one, because it's a convenient way to avoid UB if you want a
count of 32 to result in no masking.)
--
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20170922/bba00101/attachment.html>
More information about the llvm-bugs
mailing list