[llvm] r337547 - [X86][AVX] Convert X86ISD::VBROADCAST demanded elts combine to use SimplifyDemandedVectorElts

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 20 06:26:52 PDT 2018


Author: rksimon
Date: Fri Jul 20 06:26:51 2018
New Revision: 337547

URL: http://llvm.org/viewvc/llvm-project?rev=337547&view=rev
Log:
[X86][AVX] Convert X86ISD::VBROADCAST demanded elts combine to use SimplifyDemandedVectorElts

This is an early step towards using SimplifyDemandedVectorElts for target shuffle combining - this merely moves the existing X86ISD::VBROADCAST simplification code to use the SimplifyDemandedVectorElts mechanism.

Adds X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode to handle X86ISD::VBROADCAST - in time we can support all target shuffles (and other ops) here.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86ISelLowering.h

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=337547&r1=337546&r2=337547&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jul 20 06:26:51 2018
@@ -30635,24 +30635,13 @@ static SDValue combineTargetShuffle(SDVa
 
   switch (Opcode) {
   case X86ISD::VBROADCAST: {
-    // If broadcasting from another shuffle, attempt to simplify it.
     // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
-    SDValue Src = N.getOperand(0);
-    SDValue BC = peekThroughBitcasts(Src);
-    EVT SrcVT = Src.getValueType();
-    EVT BCVT = BC.getValueType();
-    if (isTargetShuffle(BC.getOpcode()) &&
-        VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
-      unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
-      SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
-                                        SM_SentinelUndef);
-      for (unsigned i = 0; i != Scale; ++i)
-        DemandedMask[i] = i;
-      if (SDValue Res = combineX86ShufflesRecursively(
-              {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1,
-              /*HasVarMask*/ false, DAG, Subtarget))
-        return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
-                           DAG.getBitcast(SrcVT, Res));
+    APInt KnownUndef, KnownZero;
+    APInt DemandedMask(APInt::getAllOnesValue(VT.getVectorNumElements()));
+    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+    if (TLI.SimplifyDemandedVectorElts(N, DemandedMask, KnownUndef, KnownZero,
+                                       DCI)) {
+      return SDValue(N.getNode(), 0);
     }
     return SDValue();
   }
@@ -31298,6 +31287,41 @@ static SDValue combineShuffle(SDNode *N,
   return SDValue();
 }
 
+bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
+    SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
+    TargetLoweringOpt &TLO, unsigned Depth) const {
+
+  if (X86ISD::VBROADCAST != Op.getOpcode())
+    return false;
+
+  EVT VT = Op.getValueType();
+  SDValue Src = Op.getOperand(0);
+  SDValue BC = peekThroughBitcasts(Src);
+  EVT SrcVT = Src.getValueType();
+  EVT BCVT = BC.getValueType();
+
+  if (!isTargetShuffle(BC.getOpcode()) ||
+      (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) != 0)
+    return false;
+
+  unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
+  SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
+                                    SM_SentinelUndef);
+  for (unsigned i = 0; i != Scale; ++i)
+    DemandedMask[i] = i;
+
+  if (SDValue Res = combineX86ShufflesRecursively(
+          {BC}, 0, BC, DemandedMask, {}, Depth + 1, /*HasVarMask*/ false,
+          TLO.DAG, Subtarget)) {
+    SDLoc DL(Op);
+    Res = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, VT,
+                          TLO.DAG.getBitcast(SrcVT, Res));
+    return TLO.CombineTo(Op, Res);
+  }
+
+  return false;
+}
+
 /// Check if a vector extract from a target-specific shuffle of a load can be
 /// folded into a single element load.
 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=337547&r1=337546&r2=337547&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Fri Jul 20 06:26:51 2018
@@ -866,6 +866,13 @@ namespace llvm {
                                              const SelectionDAG &DAG,
                                              unsigned Depth) const override;
 
+    bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op,
+                                                 const APInt &DemandedElts,
+                                                 APInt &KnownUndef,
+                                                 APInt &KnownZero,
+                                                 TargetLoweringOpt &TLO,
+                                                 unsigned Depth) const override;
+
     SDValue unwrapAddress(SDValue N) const override;
 
     bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,




More information about the llvm-commits mailing list