[llvm-bugs] [Bug 39859] New: Suboptimal int vector ugt setcc lowering

via llvm-bugs llvm-bugs at lists.llvm.org
Sun Dec 2 02:39:28 PST 2018


https://bugs.llvm.org/show_bug.cgi?id=39859

            Bug ID: 39859
           Summary: Suboptimal int vector ugt setcc lowering
           Product: libraries
           Version: trunk
          Hardware: PC
                OS: Windows NT
            Status: NEW
          Severity: enhancement
          Priority: P
         Component: Backend: X86
          Assignee: unassignedbugs at nondot.org
          Reporter: nikita.ppv at gmail.com
                CC: craig.topper at gmail.com, llvm-bugs at lists.llvm.org,
                    llvm-dev at redking.me.uk, spatel+llvm at rotateright.com

>From https://github.com/rust-lang/rust/issues/56421, original code:

pub unsafe fn foo(x: __m128i, y: __m128i) -> __m128i {
    let cmp = _mm_cmpeq_epi16(_mm_max_epu16(x, _mm_set1_epi16(0x100)), x);
    _mm_blendv_epi8(x, y, cmp)
}

LLVM IR:

define void @foo(<2 x i64>* noalias nocapture sret dereferenceable(16), <2 x
i64>* noalias nocapture readonly dereferenceable(16) %x, <2 x i64>* noalias
nocapture readonly dereferenceable(16) %y) unnamed_addr {
start:
  %1 = load <2 x i64>, <2 x i64>* %x, align 16
  %2 = bitcast <2 x i64> %1 to <8 x i16>
  %3 = icmp ugt <8 x i16> %2, <i16 255, i16 255, i16 255, i16 255, i16 255, i16
255, i16 255, i16 255>
  %4 = sext <8 x i1> %3 to <8 x i16>
  %5 = bitcast <2 x i64>* %y to <16 x i8>*
  %6 = load <16 x i8>, <16 x i8>* %5, align 16
  %7 = bitcast <2 x i64> %1 to <16 x i8>
  %8 = bitcast <8 x i16> %4 to <16 x i8>
  %9 = tail call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %7, <16 x i8> %6,
<16 x i8> %8)
  %10 = bitcast <2 x i64>* %0 to <16 x i8>*
  store <16 x i8> %9, <16 x i8>* %10, align 16
  ret void
}

declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>)

llc -mcpu=haswell gives:

        vmovdqa (%rsi), %xmm0
        vpminuw .LCPI0_0(%rip), %xmm0, %xmm1   # .LCPI0_0 is vector of 255s
        vpcmpeqw        %xmm1, %xmm0, %xmm1
        vpcmpeqd        %xmm2, %xmm2, %xmm2
        vpxor   %xmm2, %xmm1, %xmm1
        vpblendvb       %xmm1, (%rdx), %xmm0, %xmm0
        movq    %rdi, %rax
        vmovdqa %xmm0, (%rdi)
        retq

LowerVSETCC() lowers the ugt setcc as an inverted ule setcc (umin+pcmpeq).

However, in this case it would also be possible to instead adjust the constant
setcc operand by one and then lower the resulting uge setcc into umax+pcmpeq,
saving the invert and matching what the code originally intended to do.

-- 
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20181202/2e945dee/attachment.html>


More information about the llvm-bugs mailing list