[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h X86InstrSSE.td

Evan Cheng evan.cheng at apple.com
Tue Mar 21 18:53:12 PST 2006



Changes in directory llvm/lib/Target/X86:

X86ISelLowering.cpp updated: 1.115 -> 1.116
X86ISelLowering.h updated: 1.34 -> 1.35
X86InstrSSE.td updated: 1.12 -> 1.13
---
Log message:

Some splat and shuffle support.


---
Diffs of the changes:  (+157 -8)

 X86ISelLowering.cpp |   85 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 X86ISelLowering.h   |   18 +++++++++++
 X86InstrSSE.td      |   62 +++++++++++++++++++++++++++++++++----
 3 files changed, 157 insertions(+), 8 deletions(-)


Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.115 llvm/lib/Target/X86/X86ISelLowering.cpp:1.116
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.115	Tue Mar 21 17:01:21 2006
+++ llvm/lib/Target/X86/X86ISelLowering.cpp	Tue Mar 21 20:53:00 2006
@@ -278,6 +278,7 @@
     setOperationAction(ISD::MUL,              MVT::v4f32, Legal);
     setOperationAction(ISD::LOAD,             MVT::v4f32, Legal);
     setOperationAction(ISD::BUILD_VECTOR,     MVT::v4f32, Expand);
+    setOperationAction(ISD::VECTOR_SHUFFLE,   MVT::v4f32, Custom);
   }
 
   if (TM.getSubtarget<X86Subtarget>().hasSSE2()) {
@@ -299,6 +300,7 @@
     setOperationAction(ISD::BUILD_VECTOR,     MVT::v2i64, Expand);
     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,   MVT::v2f64, Custom);
   }
 
   computeRegisterProperties();
@@ -1366,6 +1368,66 @@
           (GV->isExternal() && !GV->hasNotBeenReadFromBytecode()));
 }
 
+/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a shuffle of elements that is suitable for input to PSHUFD.
+bool X86::isPSHUFDMask(SDNode *N) {
+  assert(N->getOpcode() == ISD::BUILD_VECTOR);
+
+  if (N->getNumOperands() != 4)
+    return false;
+
+  // This is a splat operation if each element of the permute is the same, and
+  // if the value doesn't reference the second vector.
+  SDOperand Elt = N->getOperand(0);
+  assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
+  for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) {
+    assert(isa<ConstantSDNode>(N->getOperand(i)) &&
+           "Invalid VECTOR_SHUFFLE mask!");
+    if (cast<ConstantSDNode>(N->getOperand(i))->getValue() >= 4) return false;
+  }
+
+  return true;
+}
+
+/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
+/// a splat of a single element.
+bool X86::isSplatMask(SDNode *N) {
+  assert(N->getOpcode() == ISD::BUILD_VECTOR);
+
+  // We can only splat 64-bit, and 32-bit quantities.
+  if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
+    return false;
+
+  // This is a splat operation if each element of the permute is the same, and
+  // if the value doesn't reference the second vector.
+  SDOperand Elt = N->getOperand(0);
+  assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
+  for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) {
+    assert(isa<ConstantSDNode>(N->getOperand(i)) &&
+           "Invalid VECTOR_SHUFFLE mask!");
+    if (N->getOperand(i) != Elt) return false;
+  }
+
+  // Make sure it is a splat of the first vector operand.
+  return cast<ConstantSDNode>(Elt)->getValue() < N->getNumOperands();
+}
+
+/// getShuffleImmediate - Return the appropriate immediate to shuffle
+/// the specified isShuffleMask VECTOR_SHUFFLE mask.
+unsigned X86::getShuffleImmediate(SDNode *N) {
+  unsigned NumOperands = N->getNumOperands();
+  unsigned Shift = (NumOperands == 4) ? 2 : 1;
+  unsigned Mask = 0;
+  unsigned i = NumOperands - 1;
+  do {
+    Mask |= cast<ConstantSDNode>(N->getOperand(i))->getValue();
+    Mask <<= Shift;
+    --i;
+  } while (i != 0);
+
+  return Mask;
+}
+
 /// LowerOperation - Provide custom lowering hooks for some operations.
 ///
 SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
@@ -2141,6 +2203,28 @@
     SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
     return DAG.getNode(X86ISD::SCALAR_TO_VECTOR, Op.getValueType(), AnyExt);
   }
+  case ISD::VECTOR_SHUFFLE: {
+    SDOperand V1 = Op.getOperand(0);
+    SDOperand V2 = Op.getOperand(1);
+    SDOperand PermMask = Op.getOperand(2);
+    MVT::ValueType VT = Op.getValueType();
+
+    if (V2.getOpcode() == ISD::UNDEF) {
+      // Handle splat cases.
+      if (X86::isSplatMask(PermMask.Val)) {
+        if (VT == MVT::v2f64 || VT == MVT::v2i64)
+          // Use unpcklpd
+          return DAG.getNode(X86ISD::UNPCKLP, VT, V1, V1);
+        // Leave the VECTOR_SHUFFLE alone. It matches SHUFP*.
+        break;
+      } else if (VT == MVT::v4f32 && X86::isPSHUFDMask(PermMask.Val))
+        // Leave the VECTOR_SHUFFLE alone. It matches PSHUFD.
+        break;
+    }
+
+    // TODO.
+    assert(0);
+  }
   }
 }
 
@@ -2175,6 +2259,7 @@
   case X86ISD::GlobalBaseReg:      return "X86ISD::GlobalBaseReg";
   case X86ISD::Wrapper:            return "X86ISD::Wrapper";
   case X86ISD::SCALAR_TO_VECTOR:   return "X86ISD::SCALAR_TO_VECTOR";
+  case X86ISD::UNPCKLP:            return "X86ISD::UNPCKLP";
   }
 }
 


Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.34 llvm/lib/Target/X86/X86ISelLowering.h:1.35
--- llvm/lib/Target/X86/X86ISelLowering.h:1.34	Tue Mar 21 17:01:21 2006
+++ llvm/lib/Target/X86/X86ISelLowering.h	Tue Mar 21 20:53:00 2006
@@ -149,6 +149,9 @@
       /// SCALAR_TO_VECTOR - X86 version of SCALAR_TO_VECTOR. The destination base
       /// type does not have to match the operand type.
       SCALAR_TO_VECTOR,
+
+      /// UNPCKLP - X86 unpack and interleave low instructions.
+      UNPCKLP,
     };
 
     // X86 specific condition code. These correspond to X86_*_COND in
@@ -174,6 +177,21 @@
     };
   }
 
+ /// Define some predicates that are used for node matching.
+ namespace X86 {
+   /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
+   /// specifies a shuffle of elements that is suitable for input to PSHUFD.
+   bool isPSHUFDMask(SDNode *N);
+
+   /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
+   /// specifies a splat of a single element.
+   bool isSplatMask(SDNode *N);
+
+   /// getShuffleImmediate - Return the appropriate immediate to shuffle
+   /// the specified isShuffleMask VECTOR_SHUFFLE mask.
+   unsigned getShuffleImmediate(SDNode *N);
+ }
+
   //===----------------------------------------------------------------------===//
   //  X86TargetLowering - X86 Implementation of the TargetLowering interface
   class X86TargetLowering : public TargetLowering {


Index: llvm/lib/Target/X86/X86InstrSSE.td
diff -u llvm/lib/Target/X86/X86InstrSSE.td:1.12 llvm/lib/Target/X86/X86InstrSSE.td:1.13
--- llvm/lib/Target/X86/X86InstrSSE.td:1.12	Tue Mar 21 17:01:21 2006
+++ llvm/lib/Target/X86/X86InstrSSE.td	Tue Mar 21 20:53:00 2006
@@ -17,14 +17,19 @@
 // SSE specific DAG Nodes.
 //===----------------------------------------------------------------------===//
 
-def X86loadp : SDNode<"X86ISD::LOAD_PACK", SDTLoad, 
-                      [SDNPHasChain]>;
-def X86fand  : SDNode<"X86ISD::FAND",     SDTFPBinOp,
-                      [SDNPCommutative, SDNPAssociative]>;
-def X86fxor  : SDNode<"X86ISD::FXOR",     SDTFPBinOp,
-                      [SDNPCommutative, SDNPAssociative]>;
-def X86s2vec : SDNode<"X86ISD::SCALAR_TO_VECTOR",
-                      SDTypeProfile<1, 1, []>, []>;
+def SDTX86Unpcklp : SDTypeProfile<1, 2,
+                                  [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>]>;
+
+def X86loadp   : SDNode<"X86ISD::LOAD_PACK", SDTLoad, 
+                        [SDNPHasChain]>;
+def X86fand    : SDNode<"X86ISD::FAND",     SDTFPBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def X86fxor    : SDNode<"X86ISD::FXOR",     SDTFPBinOp,
+                        [SDNPCommutative, SDNPAssociative]>;
+def X86s2vec   : SDNode<"X86ISD::SCALAR_TO_VECTOR",
+                        SDTypeProfile<1, 1, []>, []>;
+def X86unpcklp : SDNode<"X86ISD::UNPCKLP",
+                        SDTX86Unpcklp, []>;
 
 //===----------------------------------------------------------------------===//
 // SSE pattern fragments
@@ -36,6 +41,20 @@
 def loadv4f32    : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
 def loadv2f64    : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
 
+// SHUFFLE_get_imm xform function: convert vector_shuffle mask to PSHUF*,
+// SHUF* etc. imm.
+def SHUFFLE_get_imm : SDNodeXForm<build_vector, [{
+  return getI8Imm(X86::getShuffleImmediate(N));
+}]>;
+
+def SHUFFLE_splat_mask : PatLeaf<(build_vector), [{
+  return X86::isSplatMask(N);
+}], SHUFFLE_get_imm>;
+
+def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
+  return X86::isPSHUFDMask(N);
+}], SHUFFLE_get_imm>;
+
 //===----------------------------------------------------------------------===//
 // SSE scalar FP Instructions
 //===----------------------------------------------------------------------===//
@@ -652,6 +671,21 @@
 }
 
 // Shuffle and unpack instructions
+def PSHUFWrr : PSI<0x70, AddRegFrm,
+                   (ops VR64:$dst, VR64:$src1, i8imm:$src2),
+                   "pshufw {$src2, $src1, $dst|$dst, $src1, $src2}", []>;
+def PSHUFWrm : PSI<0x70, MRMSrcMem,
+                   (ops VR64:$dst, i64mem:$src1, i8imm:$src2),
+                   "pshufw {$src2, $src1, $dst|$dst, $src1, $src2}", []>;
+def PSHUFDrr : PDI<0x70, AddRegFrm,
+                   (ops VR128:$dst, VR128:$src1, i8imm:$src2),
+                   "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}",
+                 [(set VR128:$dst, (vector_shuffle (v4f32 VR128:$src1), (undef),
+                                    PSHUFD_shuffle_mask:$src2))]>;
+def PSHUFDrm : PDI<0x70, MRMSrcMem,
+                   (ops VR128:$dst, i128mem:$src1, i8imm:$src2),
+                   "pshufd {$src2, $src1, $dst|$dst, $src1, $src2}", []>;
+
 def SHUFPSrr : PSI<0xC6, MRMSrcReg, 
                    (ops VR128:$dst, VR128:$src1, VR128:$src2, i8imm:$src3),
                    "shufps {$src3, $src2, $dst|$dst, $src2, $src3}", []>;
@@ -755,3 +789,15 @@
 def : Pat<(v16i8 (X86s2vec R32:$src)), (MOVD128rr R32:$src)>,
   Requires<[HasSSE2]>;
 
+
+// Splat v4f32 / v4i32
+def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SHUFFLE_splat_mask:$sm),
+          (v4f32 (SHUFPSrr VR128:$src, VR128:$src, SHUFFLE_splat_mask:$sm))>;
+def : Pat<(vector_shuffle (v4i32 VR128:$src), (undef), SHUFFLE_splat_mask:$sm),
+          (v4i32 (SHUFPSrr VR128:$src, VR128:$src, SHUFFLE_splat_mask:$sm))>;
+
+// Splat v2f64 / v2i64
+def : Pat<(X86unpcklp (v2f64 VR128:$src1), VR128:$src2),
+          (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>;
+def : Pat<(X86unpcklp (v2i64 VR128:$src1), VR128:$src2),
+          (v2i64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>;






More information about the llvm-commits mailing list