[llvm] r309614 - [X86][MMX] Added custom lowering action for MMX SELECT (PR30418)

Hans Wennborg via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 10 14:25:49 PDT 2017


Should we merge this to 5.0?

On Mon, Jul 31, 2017 at 1:11 PM, Konstantin Belochapka via
llvm-commits <llvm-commits at lists.llvm.org> wrote:
> Author: kbelochapka
> Date: Mon Jul 31 13:11:49 2017
> New Revision: 309614
>
> URL: http://llvm.org/viewvc/llvm-project?rev=309614&view=rev
> Log:
> [X86][MMX] Added custom lowering action for MMX SELECT (PR30418)
> Fix for pr30418 - error in backend: Cannot select: t17: x86mmx = select_cc t2, Constant:i64<0>, t7, t8, seteq:ch
> Differential Revision: https://reviews.llvm.org/D34661
>
>
>
>
> Added:
>     llvm/trunk/test/CodeGen/X86/select-mmx.ll
> Modified:
>     llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=309614&r1=309613&r2=309614&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jul 31 13:11:49 2017
> @@ -419,6 +419,11 @@ X86TargetLowering::X86TargetLowering(con
>      setOperationAction(ISD::SELECT, VT, Custom);
>      setOperationAction(ISD::SETCC,  VT, Custom);
>    }
> +
> +  // Custom action for SELECT MMX and expand action for SELECT_CC MMX
> +  setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
> +  setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
> +
>    setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
>    // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
>    // SjLj exception handling but a light-weight setjmp/longjmp replacement to
> @@ -30631,6 +30636,14 @@ static SDValue combineSelect(SDNode *N,
>        return SDValue(N, 0);
>    }
>
> +  // Custom action for SELECT MMX
> +  if (VT == MVT::x86mmx) {
> +    LHS = DAG.getBitcast(MVT::i64, LHS);
> +    RHS = DAG.getBitcast(MVT::i64, RHS);
> +    SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
> +    return DAG.getBitcast(VT, newSelect);
> +  }
> +
>    return SDValue();
>  }
>
>
> Added: llvm/trunk/test/CodeGen/X86/select-mmx.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/select-mmx.ll?rev=309614&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/select-mmx.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/select-mmx.ll Mon Jul 31 13:11:49 2017
> @@ -0,0 +1,120 @@
> +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
> +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X64
> +; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=I32
> +
> +
> +; From source: clang -02
> +;__m64 test47(int a)
> +;{
> +;    __m64 x = (a)? (__m64)(7): (__m64)(0);
> +; return __builtin_ia32_psllw(x, x);
> +;}
> +
> +define i64 @test47(i64 %arg)  {
> +;
> +; X64-LABEL: test47:
> +; X64:       # BB#0:
> +; X64-NEXT:    xorl %eax, %eax
> +; X64-NEXT:    testq %rdi, %rdi
> +; X64-NEXT:    movl $7, %ecx
> +; X64-NEXT:    cmoveq %rcx, %rax
> +; X64-NEXT:    movd %rax, %mm0
> +; X64-NEXT:    psllw %mm0, %mm0
> +; X64-NEXT:    movd %mm0, %rax
> +; X64-NEXT:    retq
> +;
> +; I32-LABEL: test47:
> +; I32:       # BB#0:
> +; I32-NEXT:    pushl %ebp
> +; I32-NEXT:  .Lcfi0:
> +; I32-NEXT:    .cfi_def_cfa_offset 8
> +; I32-NEXT:  .Lcfi1:
> +; I32-NEXT:    .cfi_offset %ebp, -8
> +; I32-NEXT:    movl %esp, %ebp
> +; I32-NEXT:  .Lcfi2:
> +; I32-NEXT:    .cfi_def_cfa_register %ebp
> +; I32-NEXT:    andl $-8, %esp
> +; I32-NEXT:    subl $16, %esp
> +; I32-NEXT:    movl 8(%ebp), %eax
> +; I32-NEXT:    orl 12(%ebp), %eax
> +; I32-NEXT:    movl $7, %eax
> +; I32-NEXT:    je .LBB0_2
> +; I32-NEXT:  # BB#1:
> +; I32-NEXT:    xorl %eax, %eax
> +; I32-NEXT:  .LBB0_2:
> +; I32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
> +; I32-NEXT:    movl $0, {{[0-9]+}}(%esp)
> +; I32-NEXT:    movq {{[0-9]+}}(%esp), %mm0
> +; I32-NEXT:    psllw %mm0, %mm0
> +; I32-NEXT:    movq %mm0, (%esp)
> +; I32-NEXT:    movl (%esp), %eax
> +; I32-NEXT:    movl {{[0-9]+}}(%esp), %edx
> +; I32-NEXT:    movl %ebp, %esp
> +; I32-NEXT:    popl %ebp
> +; I32-NEXT:    retl
> +  %cond = icmp eq i64 %arg, 0
> +  %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx)
> +  %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct)
> +  %retc = bitcast x86_mmx %psll to i64
> +  ret i64 %retc
> +}
> +
> +
> +; From source: clang -O2
> +;__m64 test49(int a, long long n, long long m)
> +;{
> +;    __m64 x = (a)? (__m64)(n): (__m64)(m);
> +; return __builtin_ia32_psllw(x, x);
> +;}
> +
> +define i64 @test49(i64 %arg, i64 %x, i64 %y) {
> +;
> +; X64-LABEL: test49:
> +; X64:       # BB#0:
> +; X64-NEXT:    testq %rdi, %rdi
> +; X64-NEXT:    cmovneq %rdx, %rsi
> +; X64-NEXT:    movd %rsi, %mm0
> +; X64-NEXT:    psllw %mm0, %mm0
> +; X64-NEXT:    movd %mm0, %rax
> +; X64-NEXT:    retq
> +;
> +; I32-LABEL: test49:
> +; I32:       # BB#0:
> +; I32-NEXT:    pushl %ebp
> +; I32-NEXT:  .Lcfi3:
> +; I32-NEXT:    .cfi_def_cfa_offset 8
> +; I32-NEXT:  .Lcfi4:
> +; I32-NEXT:    .cfi_offset %ebp, -8
> +; I32-NEXT:    movl %esp, %ebp
> +; I32-NEXT:  .Lcfi5:
> +; I32-NEXT:    .cfi_def_cfa_register %ebp
> +; I32-NEXT:    andl $-8, %esp
> +; I32-NEXT:    subl $8, %esp
> +; I32-NEXT:    movl 8(%ebp), %eax
> +; I32-NEXT:    orl 12(%ebp), %eax
> +; I32-NEXT:    je .LBB1_1
> +; I32-NEXT:  # BB#2:
> +; I32-NEXT:    leal 24(%ebp), %eax
> +; I32-NEXT:    jmp .LBB1_3
> +; I32-NEXT:  .LBB1_1:
> +; I32-NEXT:    leal 16(%ebp), %eax
> +; I32-NEXT:  .LBB1_3:
> +; I32-NEXT:    movq (%eax), %mm0
> +; I32-NEXT:    psllw %mm0, %mm0
> +; I32-NEXT:    movq %mm0, (%esp)
> +; I32-NEXT:    movl (%esp), %eax
> +; I32-NEXT:    movl {{[0-9]+}}(%esp), %edx
> +; I32-NEXT:    movl %ebp, %esp
> +; I32-NEXT:    popl %ebp
> +; I32-NEXT:    retl
> +  %cond = icmp eq i64 %arg, 0
> +  %xmmx = bitcast i64 %x to x86_mmx
> +  %ymmx = bitcast i64 %y to x86_mmx
> +  %slct = select i1 %cond, x86_mmx %xmmx, x86_mmx %ymmx
> +  %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct)
> +  %retc = bitcast x86_mmx %psll to i64
> +  ret i64 %retc
> +}
> +
> +declare x86_mmx @llvm.x86.mmx.psll.w(x86_mmx, x86_mmx)
> +
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits


More information about the llvm-commits mailing list