[llvm] r287535 - [X86][SSE] Allow PACKSS to be used to truncate any type of all/none sign bits input

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 21 04:05:49 PST 2016


Author: rksimon
Date: Mon Nov 21 06:05:49 2016
New Revision: 287535

URL: http://llvm.org/viewvc/llvm-project?rev=287535&view=rev
Log:
[X86][SSE] Allow PACKSS to be used to truncate any type of all/none sign bits input

At the moment we only use truncateVectorCompareWithPACKSS with direct vector comparison results (just one example of a known all/none signbits input).

This change relaxes the direct matching of a SETCC opcode by moving the logic up into SelectionDAG::ComputeNumSignBits and accepting any input with a known splatted signbit.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/packss.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=287535&r1=287534&r2=287535&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Nov 21 06:05:49 2016
@@ -14664,8 +14664,8 @@ static SDValue truncateVectorCompareWith
                                                const SDLoc &DL,
                                                SelectionDAG &DAG,
                                                const X86Subtarget &Subtarget) {
-  // AVX512 has fast truncate.
-  if (Subtarget.hasAVX512())
+  // Requires SSE2 but AVX512 has fast truncate.
+  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
     return SDValue();
 
   EVT SrcVT = In.getValueType();
@@ -31020,22 +31020,20 @@ static SDValue combineVectorTruncation(S
     return SDValue();
 }
 
-/// This function transforms vector truncation of comparison results from
+/// This function transforms vector truncation of 'all or none' bits values.
 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS operations.
-static SDValue combineVectorCompareTruncation(SDNode *N, SDLoc &DL,
-                                              SelectionDAG &DAG,
-                                              const X86Subtarget &Subtarget) {
-  // AVX512 has fast truncate.
-  if (Subtarget.hasAVX512())
+static SDValue combineVectorSignBitsTruncation(SDNode *N, SDLoc &DL,
+                                               SelectionDAG &DAG,
+                                               const X86Subtarget &Subtarget) {
+  // Requires SSE2 but AVX512 has fast truncate.
+  if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
     return SDValue();
 
   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
     return SDValue();
 
-  // TODO: we should be able to support sources other than compares as long
-  // as we are saturating+packing zero/all bits only.
   SDValue In = N->getOperand(0);
-  if (In.getOpcode() != ISD::SETCC || !In.getValueType().isSimple())
+  if (!In.getValueType().isSimple())
     return SDValue();
 
   MVT VT = N->getValueType(0).getSimpleVT();
@@ -31044,9 +31042,11 @@ static SDValue combineVectorCompareTrunc
   MVT InVT = In.getValueType().getSimpleVT();
   MVT InSVT = InVT.getScalarType();
 
-  assert(DAG.getTargetLoweringInfo().getBooleanContents(InVT) ==
-             TargetLoweringBase::ZeroOrNegativeOneBooleanContent &&
-         "Expected comparison result to be zero/all bits");
+  // Use PACKSS if the input is a splatted sign bit.
+  // e.g. Comparison result, sext_in_reg, etc.
+  unsigned NumSignBits = DAG.ComputeNumSignBits(In);
+  if (NumSignBits != InSVT.getSizeInBits())
+    return SDValue();
 
   // Check we have a truncation suited for PACKSS.
   if (!VT.is128BitVector() && !VT.is256BitVector())
@@ -31077,8 +31077,8 @@ static SDValue combineTruncate(SDNode *N
       return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
   }
 
-  // Try to truncate vector comparison results with PACKSS.
-  if (SDValue V = combineVectorCompareTruncation(N, DL, DAG, Subtarget))
+  // Try to truncate extended sign bits with PACKSS.
+  if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
     return V;
 
   return combineVectorTruncation(N, DAG, Subtarget);

Modified: llvm/trunk/test/CodeGen/X86/packss.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/packss.ll?rev=287535&r1=287534&r2=287535&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/packss.ll (original)
+++ llvm/trunk/test/CodeGen/X86/packss.ll Mon Nov 21 06:05:49 2016
@@ -7,39 +7,39 @@
 define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
 ; X32-SSE-LABEL: trunc_ashr_v4i64:
 ; X32-SSE:       # BB#0:
-; X32-SSE-NEXT:    psrad $31, %xmm0
 ; X32-SSE-NEXT:    psrad $31, %xmm1
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT:    psrad $31, %xmm0
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: trunc_ashr_v4i64:
 ; X64-SSE:       # BB#0:
-; X64-SSE-NEXT:    psrad $31, %xmm0
 ; X64-SSE-NEXT:    psrad $31, %xmm1
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X64-SSE-NEXT:    psrad $31, %xmm0
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: trunc_ashr_v4i64:
 ; X64-AVX1:       # BB#0:
-; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
 ; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; X64-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: trunc_ashr_v4i64:
 ; X64-AVX2:       # BB#0:
 ; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; X64-AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
-; X64-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
   %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
@@ -50,44 +50,32 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x
 define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
 ; X32-SSE-LABEL: trunc_ashr_v8i32:
 ; X32-SSE:       # BB#0:
-; X32-SSE-NEXT:    psrad $31, %xmm0
 ; X32-SSE-NEXT:    psrad $31, %xmm1
-; X32-SSE-NEXT:    pslld $16, %xmm1
-; X32-SSE-NEXT:    psrad $16, %xmm1
-; X32-SSE-NEXT:    pslld $16, %xmm0
-; X32-SSE-NEXT:    psrad $16, %xmm0
-; X32-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X32-SSE-NEXT:    psrad $31, %xmm0
+; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
 ; X32-SSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: trunc_ashr_v8i32:
 ; X64-SSE:       # BB#0:
-; X64-SSE-NEXT:    psrad $31, %xmm0
 ; X64-SSE-NEXT:    psrad $31, %xmm1
-; X64-SSE-NEXT:    pslld $16, %xmm1
-; X64-SSE-NEXT:    psrad $16, %xmm1
-; X64-SSE-NEXT:    pslld $16, %xmm0
-; X64-SSE-NEXT:    psrad $16, %xmm0
-; X64-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X64-SSE-NEXT:    psrad $31, %xmm0
+; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: trunc_ashr_v8i32:
 ; X64-AVX1:       # BB#0:
-; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
 ; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
-; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vzeroupper
 ; X64-AVX1-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: trunc_ashr_v8i32:
 ; X64-AVX2:       # BB#0:
 ; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; X64-AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
-; X64-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vzeroupper
 ; X64-AVX2-NEXT:    retq
   %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>




More information about the llvm-commits mailing list