[llvm-commits] [llvm] r172708 - in /llvm/trunk: lib/CodeGen/SelectionDAG/DAGCombiner.cpp lib/CodeGen/SelectionDAG/SelectionDAG.cpp lib/Target/X86/X86ISelLowering.cpp test/CodeGen/X86/avx-sext.ll test/CodeGen/X86/avx2-conversions.ll

Elena Demikhovsky elena.demikhovsky at intel.com
Thu Jan 17 01:59:53 PST 2013


Author: delena
Date: Thu Jan 17 03:59:53 2013
New Revision: 172708

URL: http://llvm.org/viewvc/llvm-project?rev=172708&view=rev
Log:
Optimization for the following SIGN_EXTEND pairs:
v8i8  -> v8i64, 
v8i8  -> v8i32, 
v4i8  -> v4i64, 
v4i16 -> v4i64 
for AVX and AVX2.

Bug 14865.

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/avx-sext.ll
    llvm/trunk/test/CodeGen/X86/avx2-conversions.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=172708&r1=172707&r2=172708&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Jan 17 03:59:53 2013
@@ -4298,11 +4298,19 @@
   if (isa<ConstantSDNode>(N0))
     return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N0);
 
-  // fold (sext (sext x)) -> (sext x)
-  // fold (sext (aext x)) -> (sext x)
-  if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
-    return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT,
-                       N0.getOperand(0));
+  // Folding (sext (sext x)) is obvious, but we do it only after the type 
+  // legalization phase. When the sequence is like {(T1->T2), (T2->T3)} and 
+  // T1 or T3 (or the both) are illegal types, the TypeLegalizer may not 
+  // give a good sequence for the (T1->T3) pair.
+  // So we give a chance to target specific combiner to optimize T1->T2 and T2->T3
+  // separately and may be fold them in a preceding of subsequent instruction.
+  if (Level >= AfterLegalizeTypes) {
+    // fold (sext (sext x)) -> (sext x)
+    // fold (sext (aext x)) -> (sext x)
+    if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND)
+      return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT,
+                         N0.getOperand(0));
+  }
 
   if (N0.getOpcode() == ISD::TRUNCATE) {
     // fold (sext (truncate (load x))) -> (sext (smaller load x))

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=172708&r1=172707&r2=172708&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Thu Jan 17 03:59:53 2013
@@ -2554,9 +2554,7 @@
             VT.getVectorNumElements() ==
             Operand.getValueType().getVectorNumElements()) &&
            "Vector element count mismatch!");
-    if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
-      return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
-    else if (OpOpcode == ISD::UNDEF)
+    if (OpOpcode == ISD::UNDEF)
       // sext(undef) = 0, because the top bits will all be the same.
       return getConstant(0, VT);
     break;

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=172708&r1=172707&r2=172708&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Jan 17 03:59:53 2013
@@ -16970,14 +16970,37 @@
 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const X86Subtarget *Subtarget) {
+  EVT VT = N->getValueType(0);
+  
+  if (!VT.isVector())
+    return SDValue();
+
+  SDValue In = N->getOperand(0);
+  EVT InVT = In.getValueType();
+  DebugLoc dl = N->getDebugLoc();
+  unsigned ExtenedEltSize = VT.getVectorElementType().getSizeInBits(); 
+
+  // Split SIGN_EXTEND operation to use vmovsx instruction when possible
+  if (InVT == MVT::v8i8) {
+    if (ExtenedEltSize > 16 && !Subtarget->hasInt256())
+      In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, In);
+    if (ExtenedEltSize > 32)
+      In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i32, In);
+    return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, In);
+  }
+
+  if ((InVT == MVT::v4i8 || InVT == MVT::v4i16) &&
+      ExtenedEltSize > 32 && !Subtarget->hasInt256()) {
+    In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
+    return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, In);
+  }
   if (!DCI.isBeforeLegalizeOps())
     return SDValue();
 
   if (!Subtarget->hasFp256())
     return SDValue();
 
-  EVT VT = N->getValueType(0);
-  if (VT.isVector() && VT.getSizeInBits() == 256) {
+  if (VT.is256BitVector()) {
     SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
     if (R.getNode())
       return R;

Modified: llvm/trunk/test/CodeGen/X86/avx-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-sext.ll?rev=172708&r1=172707&r2=172708&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-sext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-sext.ll Thu Jan 17 03:59:53 2013
@@ -142,3 +142,71 @@
  %Y = sext <8 x i8> %X to <8 x i16>
  ret <8 x i16>%Y
 }
+; AVX: sext_1
+; AVX: vpmovsxbd
+; AVX: vpmovsxdq
+; AVX: vpmovsxdq
+; AVX: ret
+define void @sext_1(<4 x i8>* %inbuf, <4 x i64>* %outbuf) {
+  %v0 = load <4 x i8>* %inbuf
+  %r = sext <4 x i8> %v0 to <4 x i64>                                         
+  store <4 x i64> %r, <4 x i64>* %outbuf                                         
+  ret void                                                               
+}
+
+; AVX: sext_2
+; AVX: vpmovsxbd
+; AVX: ret
+define void @sext_2(<4 x i8>* %inbuf, <4 x i32>* %outbuf) {
+  %v0 = load <4 x i8>* %inbuf
+  %r = sext <4 x i8> %v0 to <4 x i32>                                         
+  store <4 x i32> %r, <4 x i32>* %outbuf                                         
+  ret void                                                               
+}
+  
+; AVX: sext_3
+; AVX: vpmovsxwd
+; AVX: ret
+define void @sext_3(<4 x i16>* %inbuf, <4 x i32>* %outbuf) {
+  %v0 = load <4 x i16>* %inbuf
+  %r = sext <4 x i16> %v0 to <4 x i32>                                         
+  store <4 x i32> %r, <4 x i32>* %outbuf                                         
+  ret void                                                               
+}
+  
+; AVX: sext_4
+; AVX: vpmovsxwd
+; AVX: vpmovsxdq
+; AVX: vpmovsxdq
+; AVX: ret
+define void @sext_4(<4 x i16>* %inbuf, <4 x i64>* %outbuf) {
+  %v0 = load <4 x i16>* %inbuf
+  %r = sext <4 x i16> %v0 to <4 x i64>                                         
+  store <4 x i64> %r, <4 x i64>* %outbuf                                         
+  ret void                                                               
+}
+
+; AVX: sext_5
+; AVX: vpmovsxbw
+; AVX: vpmovsxwd
+; AVX: vpmovsxwd
+; AVX: vpmovsxdq
+; AVX: ret
+define void @sext_5(<8 x i8>* %inbuf, <8 x i64>* %outbuf) {
+  %v0 = load <8 x i8>* %inbuf
+  %r = sext <8 x i8> %v0 to <8 x i64>                                         
+  store <8 x i64> %r, <8 x i64>* %outbuf                                         
+  ret void                                                               
+}
+; AVX: sext_6
+; AVX: vpmovsxbw
+; AVX: vpmovsxwd
+; AVX: vpmovsxwd
+; AVX: ret
+define void @sext_6(<8 x i8>* %inbuf, <8 x i32>* %outbuf) {
+  %v0 = load <8 x i8>* %inbuf
+  %r = sext <8 x i8> %v0 to <8 x i32>                                         
+  store <8 x i32> %r, <8 x i32>* %outbuf                                         
+  ret void                                                               
+}
+

Modified: llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-conversions.ll?rev=172708&r1=172707&r2=172708&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-conversions.ll Thu Jan 17 03:59:53 2013
@@ -107,3 +107,15 @@
  %Y = sext <8 x i8> %X to <8 x i32>
  ret <8 x i32>%Y
 }
+
+; CHECK: load_sext_test6
+; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
+; CHECK: vpmovsxdq
+; CHECK: vpmovsxdq
+; CHECK: ret 
+define <8 x i64> @load_sext_test6(<8 x i8> *%ptr) {
+ %X = load <8 x i8>* %ptr
+ %Y = sext <8 x i8> %X to <8 x i64>
+ ret <8 x i64>%Y
+}
+





More information about the llvm-commits mailing list