[llvm-commits] [llvm] r149485 - in /llvm/trunk: lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86ISelLowering.h test/CodeGen/X86/avx-trunc.ll

Chad Rosier mcrosier at apple.com
Wed Feb 1 01:26:55 PST 2012


Hi Elena,
Minor nit-picks below.

On Jan 31, 2012, at 11:56 PM, Elena Demikhovsky wrote:

> Author: delena
> Date: Wed Feb  1 01:56:44 2012
> New Revision: 149485
> 
> URL: http://llvm.org/viewvc/llvm-project?rev=149485&view=rev
> Log:
> Optimization for "truncate" operation on AVX.
> Truncating v4i64 -> v4i32 and v8i32 -> v8i16 may be done with set of shuffles.
> 
> Added:
>    llvm/trunk/test/CodeGen/X86/avx-trunc.ll   (with props)
> Modified:
>    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>    llvm/trunk/lib/Target/X86/X86ISelLowering.h
> 
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=149485&r1=149484&r2=149485&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Feb  1 01:56:44 2012
> @@ -1218,6 +1218,7 @@
>   setTargetDAGCombine(ISD::LOAD);
>   setTargetDAGCombine(ISD::STORE);
>   setTargetDAGCombine(ISD::ZERO_EXTEND);
> +  setTargetDAGCombine(ISD::TRUNCATE);
>   setTargetDAGCombine(ISD::SINT_TO_FP);
>   if (Subtarget->is64Bit())
>     setTargetDAGCombine(ISD::MUL);
> @@ -12911,6 +12912,104 @@
>   return EltsFromConsecutiveLoads(VT, Elts, dl, DAG);
> }
> 
> +
> +/// PerformTruncateCombine - Converts truncate operation to
> +/// a sequence of vector shuffle operations.
> +/// It is possible when we truncate 256-bit vector to 128-bit vector
> +
> +SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, 
> +                                                  DAGCombinerInfo &DCI) const {
> +  if (!DCI.isBeforeLegalizeOps())
> +    return SDValue();
> +
> +  if (!Subtarget->hasAVX()) return SDValue();
> +
> +  EVT VT = N->getValueType(0);
> +  SDValue Op = N->getOperand(0);
> +  EVT OpVT = Op.getValueType();
> +  DebugLoc dl = N->getDebugLoc();
> +
> +  if ((VT == MVT::v4i32) && (OpVT == MVT::v4i64)) {
> +
> +    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
> +                          DAG.getIntPtrConstant(0));
> +
> +    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Op,
> +                          DAG.getIntPtrConstant(2));
> +
> +    OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
> +    OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
> +
> +    // PSHUFD
> +    SmallVector<int,4> ShufMask1;
> +    ShufMask1.push_back(0);
> +    ShufMask1.push_back(2);
> +    ShufMask1.push_back(0);
> +    ShufMask1.push_back(0);
> +
> +    OpLo = DAG.getVectorShuffle(VT, dl, OpLo, DAG.getUNDEF(VT),
> +                                ShufMask1.data());
> +    OpHi = DAG.getVectorShuffle(VT, dl, OpHi, DAG.getUNDEF(VT),
> +                                ShufMask1.data());
> +
> +    // MOVLHPS
> +    SmallVector<int,4> ShufMask2;
> +    ShufMask2.push_back(0);
> +    ShufMask2.push_back(1);
> +    ShufMask2.push_back(4);
> +    ShufMask2.push_back(5);
> +
> +    return DAG.getVectorShuffle(VT, dl, OpLo, OpHi, ShufMask2.data());
> +  }
> +  if ((VT == MVT::v8i16) && (OpVT == MVT::v8i32)) {
> +
> +    SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
> +                          DAG.getIntPtrConstant(0));
> +
> +    SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i32, Op,
> +                          DAG.getIntPtrConstant(4));
> +
> +    OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLo);
> +    OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpHi);
> +
> +    // PSHUFB
> +    SmallVector<int,16> ShufMask1;
> +    ShufMask1.push_back(0x0);
> +    ShufMask1.push_back(0x1);
> +    ShufMask1.push_back(0x4);
> +    ShufMask1.push_back(0x5);
> +    ShufMask1.push_back(0x8);
> +    ShufMask1.push_back(0x9);
> +    ShufMask1.push_back(0xc);
> +    ShufMask1.push_back(0xd);
> +    for (unsigned i=0; i<8; ++i)

It's much preferred for format for loops like this:

  for (unsigned i = 0; i < 8; ++i)

Specifically, I'm referring to the whitespace or rather lack there of.


> +      ShufMask1.push_back(-1);
> +
> +    OpLo = DAG.getVectorShuffle(MVT::v16i8, dl, OpLo,
> +                                DAG.getUNDEF(MVT::v16i8),
> +                                ShufMask1.data());
> +    OpHi = DAG.getVectorShuffle(MVT::v16i8, dl, OpHi,
> +                                DAG.getUNDEF(MVT::v16i8),
> +                                ShufMask1.data());
> +
> +    OpLo = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpLo);
> +    OpHi = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, OpHi);
> +
> +    // MOVLHPS
> +    SmallVector<int,4> ShufMask2;
> +    ShufMask2.push_back(0);
> +    ShufMask2.push_back(1);
> +    ShufMask2.push_back(4);
> +    ShufMask2.push_back(5);
> +
> +    SDValue res = DAG.getVectorShuffle(MVT::v4i32, dl, OpLo, OpHi, ShufMask2.data());
> +    return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, res);
> + 

Extra newline.

> +  }
> +
> +  return SDValue();
> +}
> +
> /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
> /// generation and convert it from being a bunch of shuffles and extracts
> /// to a simple store and scalar loads to extract the elements.
> @@ -14771,6 +14870,7 @@
>   case X86ISD::BT:          return PerformBTCombine(N, DAG, DCI);
>   case X86ISD::VZEXT_MOVL:  return PerformVZEXT_MOVLCombine(N, DAG);
>   case ISD::ZERO_EXTEND:    return PerformZExtCombine(N, DAG, Subtarget);
> +  case ISD::TRUNCATE:       return PerformTruncateCombine(N, DAG, DCI);
>   case X86ISD::SETCC:       return PerformSETCCCombine(N, DAG);
>   case X86ISD::SHUFP:       // Handle all target specific shuffles
>   case X86ISD::PALIGN:
> 
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=149485&r1=149484&r2=149485&view=diff
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Wed Feb  1 01:56:44 2012
> @@ -839,6 +839,7 @@
>     SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
>     SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
>     SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
> +    SDValue PerformTruncateCombine(SDNode* N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const;

80-column violation?

> 
>     // Utility functions to help LowerVECTOR_SHUFFLE
>     SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
> 
> Added: llvm/trunk/test/CodeGen/X86/avx-trunc.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-trunc.ll?rev=149485&view=auto
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/avx-trunc.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/avx-trunc.ll Wed Feb  1 01:56:44 2012
> @@ -0,0 +1,15 @@
> +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
> +
> +define <4 x i32> @trunc_64_32(<4 x i64> %A) nounwind uwtable readnone ssp{
> +; CHECK: trunc_64_32
> +; CHECK: pshufd
> +  %B = trunc <4 x i64> %A to <4 x i32>
> +  ret <4 x i32>%B
> +}
> +define <8 x i16> @trunc_32_16(<8 x i32> %A) nounwind uwtable readnone ssp{
> +; CHECK: trunc_32_16
> +; CHECK: pshufb
> +  %B = trunc <8 x i32> %A to <8 x i16>
> +  ret <8 x i16>%B
> +}
> +
> 
> Propchange: llvm/trunk/test/CodeGen/X86/avx-trunc.ll
> ------------------------------------------------------------------------------
>    svn:executable = *
> 
> 
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list