[LLVMbugs] [Bug 10711] New: [AVX] integer math ops are scalarized

bugzilla-daemon at llvm.org bugzilla-daemon at llvm.org
Sat Aug 20 04:45:20 PDT 2011


http://llvm.org/bugs/show_bug.cgi?id=10711

           Summary: [AVX] integer math ops are scalarized
           Product: new-bugs
           Version: trunk
          Platform: PC
        OS/Version: All
            Status: NEW
          Severity: normal
          Priority: P
         Component: new bugs
        AssignedTo: unassignedbugs at nondot.org
        ReportedBy: matt at pharr.org
                CC: llvmbugs at cs.uiuc.edu


Given:

define <8 x i32> @foo(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
  %x = add <8 x i32> %i, %j
  ret <8 x i32> %x
}

llc -mattr=+avx extracts each element of the vector and does a scalar add (code
below).  It would be nice for it to use a pair of SSE paddd instructions
instead.


% llc -mattr=+avx -o - bug.ll
    .section    __TEXT,__text,regular,pure_instructions
    .globl    _foo
    .align    4, 0x90
_foo:                                   ## @foo
## BB#0:
    vextractf128    $1, %ymm1, %xmm2
    vpextrd    $2, %xmm2, %eax
    vextractf128    $1, %ymm0, %xmm3
    vpextrd    $2, %xmm3, %ecx
    vmovd    %xmm2, %esi
    vmovd    %xmm3, %edx
    addl    %esi, %edx
    addl    %eax, %ecx
    vpextrd    $3, %xmm2, %r8d
    vpextrd    $3, %xmm3, %esi
    vpextrd    $3, %xmm1, %edi
    vpextrd    $3, %xmm0, %eax
    addl    %edi, %eax
    vmovd    %ecx, %xmm4
    vmovd    %edx, %xmm5
    addl    %r8d, %esi
    vmovd    %esi, %xmm6
    vpextrd    $1, %xmm2, %edx
    vpextrd    $1, %xmm3, %ecx
    addl    %edx, %ecx
    vmovd    %ecx, %xmm2
    vpunpckldq    %xmm6, %xmm2, %xmm2
    vpunpckldq    %xmm4, %xmm5, %xmm4
    vmovd    %eax, %xmm3
    vpextrd    $1, %xmm1, %ecx
    vpextrd    $1, %xmm0, %eax
    addl    %ecx, %eax
    vmovd    %eax, %xmm5
    vpextrd    $2, %xmm1, %ecx
    vpextrd    $2, %xmm0, %eax
    vpunpckldq    %xmm2, %xmm4, %xmm2
    vpunpckldq    %xmm3, %xmm5, %xmm3
    addl    %ecx, %eax
    vmovd    %eax, %xmm4
    vmovd    %xmm1, %ecx
    vmovd    %xmm0, %eax
    addl    %ecx, %eax
    vmovd    %eax, %xmm0
    vpunpckldq    %xmm4, %xmm0, %xmm0
    vpunpckldq    %xmm3, %xmm0, %xmm0
    vinsertf128    $1, %xmm2, %ymm0, %ymm0
    ret

-- 
Configure bugmail: http://llvm.org/bugs/userprefs.cgi?tab=email
------- You are receiving this mail because: -------
You are on the CC list for the bug.



More information about the llvm-bugs mailing list