[llvm] r295652 - [SelectionDAG] Add scalarization support for ISD::*_EXTEND_VECTOR_INREG opcodes.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 20 03:55:59 PST 2017


Author: rksimon
Date: Mon Feb 20 05:55:58 2017
New Revision: 295652

URL: http://llvm.org/viewvc/llvm-project?rev=295652&view=rev
Log:
[SelectionDAG] Add scalarization support for ISD::*_EXTEND_VECTOR_INREG opcodes.

Thanks to Mikael Holmén for the initial test case

Added:
    llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h
    llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h?rev=295652&r1=295651&r2=295652&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeTypes.h Mon Feb 20 05:55:58 2017
@@ -602,6 +602,7 @@ private:
   SDValue ScalarizeVecRes_TernaryOp(SDNode *N);
   SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
   SDValue ScalarizeVecRes_InregOp(SDNode *N);
+  SDValue ScalarizeVecRes_VecInregOp(SDNode *N);
 
   SDValue ScalarizeVecRes_BITCAST(SDNode *N);
   SDValue ScalarizeVecRes_BUILD_VECTOR(SDNode *N);

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp?rev=295652&r1=295651&r2=295652&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp Mon Feb 20 05:55:58 2017
@@ -65,6 +65,11 @@ void DAGTypeLegalizer::ScalarizeVectorRe
   case ISD::SETCC:             R = ScalarizeVecRes_SETCC(N); break;
   case ISD::UNDEF:             R = ScalarizeVecRes_UNDEF(N); break;
   case ISD::VECTOR_SHUFFLE:    R = ScalarizeVecRes_VECTOR_SHUFFLE(N); break;
+  case ISD::ANY_EXTEND_VECTOR_INREG:
+  case ISD::SIGN_EXTEND_VECTOR_INREG:
+  case ISD::ZERO_EXTEND_VECTOR_INREG:
+    R = ScalarizeVecRes_VecInregOp(N);
+    break;
   case ISD::ANY_EXTEND:
   case ISD::BITREVERSE:
   case ISD::BSWAP:
@@ -258,6 +263,34 @@ SDValue DAGTypeLegalizer::ScalarizeVecRe
                      LHS, DAG.getValueType(ExtVT));
 }
 
+SDValue DAGTypeLegalizer::ScalarizeVecRes_VecInregOp(SDNode *N) {
+  SDLoc DL(N);
+  SDValue Op = N->getOperand(0);
+
+  EVT OpVT = Op.getValueType();
+  EVT OpEltVT = OpVT.getVectorElementType();
+  EVT EltVT = N->getValueType(0).getVectorElementType();
+
+  if (getTypeAction(OpVT) == TargetLowering::TypeScalarizeVector) {
+    Op = GetScalarizedVector(Op);
+  } else {
+    Op = DAG.getNode(
+        ISD::EXTRACT_VECTOR_ELT, DL, OpEltVT, Op,
+        DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
+  }
+
+  switch (N->getOpcode()) {
+  case ISD::ANY_EXTEND_VECTOR_INREG:
+    return DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Op);
+  case ISD::SIGN_EXTEND_VECTOR_INREG:
+    return DAG.getNode(ISD::SIGN_EXTEND, DL, EltVT, Op);
+  case ISD::ZERO_EXTEND_VECTOR_INREG:
+    return DAG.getNode(ISD::ZERO_EXTEND, DL, EltVT, Op);
+  }
+
+  llvm_unreachable("Illegal extend_vector_inreg opcode");
+}
+
 SDValue DAGTypeLegalizer::ScalarizeVecRes_SCALAR_TO_VECTOR(SDNode *N) {
   // If the operand is wider than the vector element type then it is implicitly
   // truncated.  Make that explicit here.

Added: llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll?rev=295652&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll (added)
+++ llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll Mon Feb 20 05:55:58 2017
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2   | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2   | FileCheck %s --check-prefix=ALL --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=X64-AVX
+
+define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) nounwind {
+; X32-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    pushl %ebp
+; X32-SSE-NEXT:    movl %esp, %ebp
+; X32-SSE-NEXT:    andl $-128, %esp
+; X32-SSE-NEXT:    subl $384, %esp # imm = 0x180
+; X32-SSE-NEXT:    movl 88(%ebp), %ecx
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
+; X32-SSE-NEXT:    xorps %xmm1, %xmm1
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm0[0],zero
+; X32-SSE-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    movaps %xmm1, (%esp)
+; X32-SSE-NEXT:    movdqa %xmm0, {{[0-9]+}}(%esp)
+; X32-SSE-NEXT:    leal (%ecx,%ecx), %eax
+; X32-SSE-NEXT:    andl $31, %eax
+; X32-SSE-NEXT:    movl 128(%esp,%eax,4), %eax
+; X32-SSE-NEXT:    leal 1(%ecx,%ecx), %ecx
+; X32-SSE-NEXT:    andl $31, %ecx
+; X32-SSE-NEXT:    movl (%esp,%ecx,4), %edx
+; X32-SSE-NEXT:    movl %ebp, %esp
+; X32-SSE-NEXT:    popl %ebp
+; X32-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    pushq %rbp
+; X64-SSE-NEXT:    movq %rsp, %rbp
+; X64-SSE-NEXT:    andq $-128, %rsp
+; X64-SSE-NEXT:    subq $256, %rsp # imm = 0x100
+; X64-SSE-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SSE-NEXT:    psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-SSE-NEXT:    xorps %xmm0, %xmm0
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
+; X64-SSE-NEXT:    movdqa %xmm7, {{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    andl $15, %edi
+; X64-SSE-NEXT:    movq (%rsp,%rdi,8), %rax
+; X64-SSE-NEXT:    movq %rbp, %rsp
+; X64-SSE-NEXT:    popq %rbp
+; X64-SSE-NEXT:    retq
+;
+; X32-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X32-AVX:       # BB#0:
+; X32-AVX-NEXT:    pushl %ebp
+; X32-AVX-NEXT:    movl %esp, %ebp
+; X32-AVX-NEXT:    andl $-128, %esp
+; X32-AVX-NEXT:    subl $384, %esp # imm = 0x180
+; X32-AVX-NEXT:    movl 40(%ebp), %ecx
+; X32-AVX-NEXT:    vbroadcastsd 32(%ebp), %ymm0
+; X32-AVX-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X32-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X32-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm0, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    vmovapd %ymm1, (%esp)
+; X32-AVX-NEXT:    vmovapd %ymm0, {{[0-9]+}}(%esp)
+; X32-AVX-NEXT:    leal (%ecx,%ecx), %eax
+; X32-AVX-NEXT:    andl $31, %eax
+; X32-AVX-NEXT:    movl 128(%esp,%eax,4), %eax
+; X32-AVX-NEXT:    leal 1(%ecx,%ecx), %ecx
+; X32-AVX-NEXT:    andl $31, %ecx
+; X32-AVX-NEXT:    movl (%esp,%ecx,4), %edx
+; X32-AVX-NEXT:    movl %ebp, %esp
+; X32-AVX-NEXT:    popl %ebp
+; X32-AVX-NEXT:    vzeroupper
+; X32-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: extract_any_extend_vector_inreg_v16i64:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    pushq %rbp
+; X64-AVX-NEXT:    movq %rsp, %rbp
+; X64-AVX-NEXT:    andq $-128, %rsp
+; X64-AVX-NEXT:    subq $256, %rsp # imm = 0x100
+; X64-AVX-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-AVX-NEXT:    vpermq {{.*#+}} ymm0 = ymm3[3,1,2,3]
+; X64-AVX-NEXT:    vxorpd %ymm1, %ymm1, %ymm1
+; X64-AVX-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
+; X64-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    vmovapd %ymm1, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    vmovapd %ymm1, (%rsp)
+; X64-AVX-NEXT:    vmovapd %ymm0, {{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    andl $15, %edi
+; X64-AVX-NEXT:    movq (%rsp,%rdi,8), %rax
+; X64-AVX-NEXT:    movq %rbp, %rsp
+; X64-AVX-NEXT:    popq %rbp
+; X64-AVX-NEXT:    vzeroupper
+; X64-AVX-NEXT:    retq
+  %1 = extractelement <16 x i64> %a0, i32 15
+  %2 = insertelement <16 x i64> zeroinitializer, i64 %1, i32 4
+  %3 = extractelement <16 x i64> %2, i32 %a1
+  ret i64 %3
+}




More information about the llvm-commits mailing list