[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h
Evan Cheng
evan.cheng at apple.com
Wed Apr 19 13:35:34 PDT 2006
Changes in directory llvm/lib/Target/X86:
X86ISelLowering.cpp updated: 1.173 -> 1.174
X86ISelLowering.h updated: 1.55 -> 1.56
---
Log message:
Commute vector_shuffle to match more movlhps, movlp{s|d} cases.
---
Diffs of the changes: (+59 -63)
X86ISelLowering.cpp | 115 +++++++++++++++++++++++++---------------------------
X86ISelLowering.h | 7 ---
2 files changed, 59 insertions(+), 63 deletions(-)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.173 llvm/lib/Target/X86/X86ISelLowering.cpp:1.174
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.173 Mon Apr 17 17:45:49 2006
+++ llvm/lib/Target/X86/X86ISelLowering.cpp Wed Apr 19 15:35:22 2006
@@ -1555,21 +1555,6 @@
isUndefOrEqual(N->getOperand(3), 3);
}
-/// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
-bool X86::isMOVLHPSMask(SDNode *N) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR);
-
- if (N->getNumOperands() != 4)
- return false;
-
- // Expect bit0 == 0, bit1 == 1, bit2 == 4, bit3 == 5
- return isUndefOrEqual(N->getOperand(0), 0) &&
- isUndefOrEqual(N->getOperand(1), 1) &&
- isUndefOrEqual(N->getOperand(2), 4) &&
- isUndefOrEqual(N->getOperand(3), 5);
-}
-
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
bool X86::isMOVLPMask(SDNode *N) {
@@ -1591,7 +1576,8 @@
}
/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}.
+/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
+/// and MOVLHPS.
bool X86::isMOVHPMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
@@ -1909,35 +1895,52 @@
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1, Mask);
}
+/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
+/// match movhlps. The lower half elements should come from upper half of
+/// V1 (and in order), and the upper half elements should come from the upper
+/// half of V2 (and in order).
+static bool ShouldXformToMOVHLPS(SDNode *Mask) {
+ unsigned NumElems = Mask->getNumOperands();
+ if (NumElems != 4)
+ return false;
+ for (unsigned i = 0, e = 2; i != e; ++i)
+ if (!isUndefOrEqual(Mask->getOperand(i), i+2))
+ return false;
+ for (unsigned i = 2; i != 4; ++i)
+ if (!isUndefOrEqual(Mask->getOperand(i), i+4))
+ return false;
+ return true;
+}
+
/// isScalarLoadToVector - Returns true if the node is a scalar load that
/// is promoted to a vector.
-static inline bool isScalarLoadToVector(SDOperand Op) {
- if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- Op = Op.getOperand(0);
- return (Op.getOpcode() == ISD::LOAD);
+static inline bool isScalarLoadToVector(SDNode *N) {
+ if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ N = N->getOperand(0).Val;
+ return (N->getOpcode() == ISD::LOAD);
}
return false;
}
-/// ShouldXformedToMOVLP - Return true if the node should be transformed to
-/// match movlp{d|s}. The lower half elements should come from V1 (and in
-/// order), and the upper half elements should come from the upper half of
-/// V2 (not necessarily in order). And since V1 will become the source of
-/// the MOVLP, it must be a scalar load.
-static bool ShouldXformedToMOVLP(SDOperand V1, SDOperand V2, SDOperand Mask) {
- if (isScalarLoadToVector(V1)) {
- unsigned NumElems = Mask.getNumOperands();
- for (unsigned i = 0, e = NumElems/2; i != e; ++i)
- if (!isUndefOrEqual(Mask.getOperand(i), i))
- return false;
- for (unsigned i = NumElems/2; i != NumElems; ++i)
- if (!isUndefOrInRange(Mask.getOperand(i),
- NumElems+NumElems/2, NumElems*2))
- return false;
- return true;
- }
+/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
+/// match movlp{s|d}. The lower half elements should come from lower half of
+/// V1 (and in order), and the upper half elements should come from the upper
+/// half of V2 (and in order). And since V1 will become the source of the
+/// MOVLP, it must be either a vector load or a scalar load to vector.
+static bool ShouldXformToMOVLP(SDNode *V1, SDNode *Mask) {
+ if (V1->getOpcode() != ISD::LOAD && !isScalarLoadToVector(V1))
+ return false;
- return false;
+ unsigned NumElems = Mask->getNumOperands();
+ if (NumElems != 2 && NumElems != 4)
+ return false;
+ for (unsigned i = 0, e = NumElems/2; i != e; ++i)
+ if (!isUndefOrEqual(Mask->getOperand(i), i))
+ return false;
+ for (unsigned i = NumElems/2; i != NumElems; ++i)
+ if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
+ return false;
+ return true;
}
/// isLowerFromV2UpperFromV1 - Returns true if the shuffle mask is except
@@ -2806,29 +2809,16 @@
return PromoteSplat(Op, DAG);
}
- // Normalize the node to match x86 shuffle ops if needed
- if (V2.getOpcode() != ISD::UNDEF) {
- bool DoSwap = false;
-
- if (ShouldXformedToMOVLP(V1, V2, PermMask))
- DoSwap = true;
- else if (isLowerFromV2UpperFromV1(PermMask))
- DoSwap = true;
-
- if (DoSwap) {
- Op = CommuteVectorShuffle(Op, DAG);
- V1 = Op.getOperand(0);
- V2 = Op.getOperand(1);
- PermMask = Op.getOperand(2);
- }
- }
-
- if (NumElems == 2)
- return Op;
+ if (ShouldXformToMOVHLPS(PermMask.Val) ||
+ ShouldXformToMOVLP(V1.Val, PermMask.Val))
+ return CommuteVectorShuffle(Op, DAG);
if (X86::isMOVSMask(PermMask.Val) ||
X86::isMOVSHDUPMask(PermMask.Val) ||
- X86::isMOVSLDUPMask(PermMask.Val))
+ X86::isMOVSLDUPMask(PermMask.Val) ||
+ X86::isMOVHLPSMask(PermMask.Val) ||
+ X86::isMOVHPMask(PermMask.Val) ||
+ X86::isMOVLPMask(PermMask.Val))
return Op;
if (X86::isUNPCKLMask(PermMask.Val) ||
@@ -2837,6 +2827,15 @@
// Leave the VECTOR_SHUFFLE alone. It matches {P}UNPCKL*.
return Op;
+ // Normalize the node to match x86 shuffle ops if needed
+ if (V2.getOpcode() != ISD::UNDEF)
+ if (isLowerFromV2UpperFromV1(PermMask)) {
+ Op = CommuteVectorShuffle(Op, DAG);
+ V1 = Op.getOperand(0);
+ V2 = Op.getOperand(1);
+ PermMask = Op.getOperand(2);
+ }
+
// If VT is integer, try PSHUF* first, then SHUFP*.
if (MVT::isInteger(VT)) {
if (X86::isPSHUFDMask(PermMask.Val) ||
Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.55 llvm/lib/Target/X86/X86ISelLowering.h:1.56
--- llvm/lib/Target/X86/X86ISelLowering.h:1.55 Fri Apr 14 16:59:03 2006
+++ llvm/lib/Target/X86/X86ISelLowering.h Wed Apr 19 15:35:22 2006
@@ -204,10 +204,6 @@
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
bool isSHUFPMask(SDNode *N);
- /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
- bool isMOVLHPSMask(SDNode *N);
-
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVHLPSMask(SDNode *N);
@@ -217,7 +213,8 @@
bool isMOVLPMask(SDNode *N);
/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}.
+ /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
+ /// as well as MOVLHPS.
bool isMOVHPMask(SDNode *N);
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
More information about the llvm-commits
mailing list