[llvm-bugs] [Bug 46277] New: [X86][SSE] Failure to SimplifyDemandedElts through ADDSUB intrinsics

via llvm-bugs llvm-bugs at lists.llvm.org
Thu Jun 11 04:14:09 PDT 2020


https://bugs.llvm.org/show_bug.cgi?id=46277

            Bug ID: 46277
           Summary: [X86][SSE] Failure to SimplifyDemandedElts through
                    ADDSUB intrinsics
           Product: libraries
           Version: trunk
          Hardware: PC
                OS: Windows NT
            Status: NEW
          Severity: enhancement
          Priority: P
         Component: Backend: X86
          Assignee: unassignedbugs at nondot.org
          Reporter: llvm-dev at redking.me.uk
                CC: craig.topper at gmail.com, llvm-bugs at lists.llvm.org,
                    llvm-dev at redking.me.uk, spatel+llvm at rotateright.com

https://godbolt.org/z/KPaVBx

We are failing to remove undemanded elements through addsub intrinsics. This
most likely needs to be handled in InstCombiner::SimplifyDemandedVectorElts,
but we might still need it in
X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode as well, as we do
generate X86ISD::ADDSUB on the fly, and the SHUFFLE(FADD,FSUB) pattern might
mean that hasOneUse tests will have failed before the combine.

#include <x86intrin.h>

void test_fadd(float x, float y, float z, float w, __m128 right, float *out) {
 __m128 left = _mm_setr_ps(x, y, z, w);
 __m128 result = _mm_add_ps(left, right);
 *out++ = _mm_cvtss_f32(result);
 *out++ = _mm_cvtss_f32(_mm_shuffle_ps(result, result, _MM_SHUFFLE(1,1,1,1)));
}
void test_faddsub(float x, float y, float z, float w, __m128 right, float *out)
{
 __m128 left = _mm_setr_ps(x, y, z, w);
 __m128 result = _mm_addsub_ps(left, right);
 *out++ = _mm_cvtss_f32(result);
 *out++ = _mm_cvtss_f32(_mm_shuffle_ps(result, result, _MM_SHUFFLE(1,1,1,1)));
}

test_fadd:
  vinsertps $16, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
  vaddps %xmm4, %xmm0, %xmm0
  vmovss %xmm0, (%rdi)
  vextractps $1, %xmm0, 4(%rdi)
  retq
test_faddsub:
  vinsertps $16, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
  vinsertps $32, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
  vinsertps $48, %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm3[0]
  vaddsubps %xmm4, %xmm0, %xmm0
  vmovss %xmm0, (%rdi)
  vextractps $1, %xmm0, 4(%rdi)
  retq

we also fail to fold the stores into movsd/movlps....

-- 
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20200611/4392dd5c/attachment-0001.html>


More information about the llvm-bugs mailing list