[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h X86InstrSSE.td
Evan Cheng
evan.cheng at apple.com
Mon Mar 27 18:43:38 PST 2006
Changes in directory llvm/lib/Target/X86:
X86ISelLowering.cpp updated: 1.136 -> 1.137
X86ISelLowering.h updated: 1.45 -> 1.46
X86InstrSSE.td updated: 1.35 -> 1.36
---
Log message:
- Clean up / consoladate various shuffle masks.
- Some misc. bug fixes.
- Use MOVHPDrm to load from m64 to upper half of a XMM register.
---
Diffs of the changes: (+124 -96)
X86ISelLowering.cpp | 58 ++++++++------------
X86ISelLowering.h | 13 +---
X86InstrSSE.td | 149 +++++++++++++++++++++++++++++++++-------------------
3 files changed, 124 insertions(+), 96 deletions(-)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.136 llvm/lib/Target/X86/X86ISelLowering.cpp:1.137
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.136 Mon Mar 27 18:39:58 2006
+++ llvm/lib/Target/X86/X86ISelLowering.cpp Mon Mar 27 20:43:26 2006
@@ -1451,24 +1451,6 @@
return true;
}
-/// isMOVLHPSorUNPCKLPDMask - Return true if the specified VECTOR_SHUFFLE
-/// operand specifies a shuffle of elements that is suitable for input to
-/// MOVLHPS or UNPCKLPD.
-bool X86::isMOVLHPSorUNPCKLPDMask(SDNode *N) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR);
-
- if (N->getNumOperands() != 2)
- return false;
-
- // Expect bit 0 == 0, bit1 == 2
- SDOperand Bit0 = N->getOperand(0);
- SDOperand Bit1 = N->getOperand(1);
- assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
- "Invalid VECTOR_SHUFFLE mask!");
- return (cast<ConstantSDNode>(Bit0)->getValue() == 0 &&
- cast<ConstantSDNode>(Bit1)->getValue() == 2);
-}
-
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool X86::isMOVHLPSMask(SDNode *N) {
@@ -1477,7 +1459,7 @@
if (N->getNumOperands() != 2)
return false;
- // Expect bit 0 == 0, bit1 == 3
+ // Expect bit 0 == 1, bit1 == 1
SDOperand Bit0 = N->getOperand(0);
SDOperand Bit1 = N->getOperand(1);
assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
@@ -1486,26 +1468,32 @@
cast<ConstantSDNode>(Bit1)->getValue() == 3);
}
-/// isUNPCKHPDMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to UNPCKHPD.
-bool X86::isUNPCKHPDMask(SDNode *N) {
+/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a shuffle of elements that is suitable for input to UNPCKL.
+bool X86::isUNPCKLMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
- if (N->getNumOperands() != 2)
+ unsigned NumElems = N->getNumOperands();
+ if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
return false;
- // Expect bit 0 == 1, bit1 == 3
- SDOperand Bit0 = N->getOperand(0);
- SDOperand Bit1 = N->getOperand(1);
- assert(isa<ConstantSDNode>(Bit0) && isa<ConstantSDNode>(Bit1) &&
- "Invalid VECTOR_SHUFFLE mask!");
- return (cast<ConstantSDNode>(Bit0)->getValue() == 1 &&
- cast<ConstantSDNode>(Bit1)->getValue() == 3);
+ for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
+ SDOperand BitI = N->getOperand(i);
+ SDOperand BitI1 = N->getOperand(i+1);
+ assert(isa<ConstantSDNode>(BitI) && isa<ConstantSDNode>(BitI1) &&
+ "Invalid VECTOR_SHUFFLE mask!");
+ if (cast<ConstantSDNode>(BitI)->getValue() != j)
+ return false;
+ if (cast<ConstantSDNode>(BitI1)->getValue() != j + NumElems)
+ return false;
+ }
+
+ return true;
}
-/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
-/// specifies a shuffle of elements that is suitable for input to UNPCKL.
-bool X86::isUNPCKLMask(SDNode *N) {
+/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
+/// specifies a shuffle of elements that is suitable for input to UNPCKH.
+bool X86::isUNPCKHMask(SDNode *N) {
assert(N->getOpcode() == ISD::BUILD_VECTOR);
unsigned NumElems = N->getNumOperands();
@@ -1517,9 +1505,9 @@
SDOperand BitI1 = N->getOperand(i+1);
assert(isa<ConstantSDNode>(BitI) && isa<ConstantSDNode>(BitI1) &&
"Invalid VECTOR_SHUFFLE mask!");
- if (cast<ConstantSDNode>(BitI)->getValue() != j)
+ if (cast<ConstantSDNode>(BitI)->getValue() != j + NumElems/2)
return false;
- if (cast<ConstantSDNode>(BitI1)->getValue() != j + NumElems)
+ if (cast<ConstantSDNode>(BitI1)->getValue() != j + NumElems/2 + NumElems)
return false;
}
Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.45 llvm/lib/Target/X86/X86ISelLowering.h:1.46
--- llvm/lib/Target/X86/X86ISelLowering.h:1.45 Mon Mar 27 18:39:58 2006
+++ llvm/lib/Target/X86/X86ISelLowering.h Mon Mar 27 20:43:26 2006
@@ -188,23 +188,18 @@
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
bool isSHUFPMask(SDNode *N);
- /// isMOVLHPSorUNPCKLPDMask - Return true if the specified VECTOR_SHUFFLE
- /// operand specifies a shuffle of elements that is suitable for input to
- /// MOVLHPS or UNPCKLPD.
- bool isMOVLHPSorUNPCKLPDMask(SDNode *N);
-
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
bool isMOVHLPSMask(SDNode *N);
- /// isUNPCKHPDMask - Return true if the specified VECTOR_SHUFFLE operand
- /// specifies a shuffle of elements that is suitable for input to UNPCKHPD.
- bool isUNPCKHPDMask(SDNode *N);
-
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
bool isUNPCKLMask(SDNode *N);
+ /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
+ /// specifies a shuffle of elements that is suitable for input to UNPCKH.
+ bool isUNPCKHMask(SDNode *N);
+
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element.
bool isSplatMask(SDNode *N);
Index: llvm/lib/Target/X86/X86InstrSSE.td
diff -u llvm/lib/Target/X86/X86InstrSSE.td:1.35 llvm/lib/Target/X86/X86InstrSSE.td:1.36
--- llvm/lib/Target/X86/X86InstrSSE.td:1.35 Mon Mar 27 18:39:58 2006
+++ llvm/lib/Target/X86/X86InstrSSE.td Mon Mar 27 20:43:26 2006
@@ -63,22 +63,18 @@
return X86::isSplatMask(N);
}]>;
-def MOVLHPSorUNPCKLPD_shuffle_mask : PatLeaf<(build_vector), [{
- return X86::isMOVLHPSorUNPCKLPDMask(N);
-}], SHUFFLE_get_shuf_imm>;
-
def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isMOVHLPSMask(N);
-}], SHUFFLE_get_shuf_imm>;
-
-def UNPCKHPD_shuffle_mask : PatLeaf<(build_vector), [{
- return X86::isUNPCKHPDMask(N);
-}], SHUFFLE_get_shuf_imm>;
+}]>;
def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
return X86::isUNPCKLMask(N);
}]>;
+def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
+ return X86::isUNPCKHMask(N);
+}]>;
+
// Only use PSHUF if it is not a splat.
def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
return !X86::isSplatMask(N) && X86::isPSHUFDMask(N);
@@ -172,7 +168,7 @@
def MOVSD128rm : SDI<0x10, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
"movsd {$src, $dst|$dst, $src}",
[(set VR128:$dst,
- (v4f32 (scalar_to_vector (loadf64 addr:$src))))]>;
+ (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
// Conversion instructions
@@ -476,21 +472,34 @@
def MOVLPDmr : PDI<0x13, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movlpd {$src, $dst|$dst, $src}", []>;
-def MOVHPSrm : PSI<0x16, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
- "movhps {$src, $dst|$dst, $src}", []>;
+let isTwoAddress = 1 in {
+def MOVHPSrm : PSI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
+ "movhps {$src2, $dst|$dst, $src2}", []>;
+def MOVHPDrm : PDI<0x16, MRMSrcMem, (ops VR128:$dst, VR128:$src1, f64mem:$src2),
+ "movhpd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)),
+ UNPCKL_shuffle_mask)))]>;
+}
+
def MOVHPSmr : PSI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movhps {$src, $dst|$dst, $src}", []>;
-def MOVHPDrm : PDI<0x16, MRMSrcMem, (ops VR128:$dst, f64mem:$src),
- "movhpd {$src, $dst|$dst, $src}", []>;
def MOVHPDmr : PDI<0x17, MRMDestMem, (ops f64mem:$dst, VR128:$src),
"movhpd {$src, $dst|$dst, $src}", []>;
let isTwoAddress = 1 in {
def MOVLHPSrr : PSI<0x16, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
- "movlhps {$src2, $dst|$dst, $src2}", []>;
+ "movlhps {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKL_shuffle_mask)))]>;
def MOVHLPSrr : PSI<0x12, MRMSrcReg, (ops VR128:$dst, VR128:$src1, VR128:$src2),
- "movlhps {$src2, $dst|$dst, $src2}", []>;
+ "movlhps {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
+ MOVHLPS_shuffle_mask)))]>;
}
def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (ops R32:$dst, VR128:$src),
@@ -784,16 +793,29 @@
def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "unpckhps {$src2, $dst|$dst, $src2}", []>;
+ "unpckhps {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src2),
- "unpckhps {$src2, $dst|$dst, $src2}", []>;
+ "unpckhps {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "unpckhpd {$src2, $dst|$dst, $src2}", []>;
+ "unpckhpd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src2),
- "unpckhpd {$src2, $dst|$dst, $src2}", []>;
+ "unpckhpd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
+
def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
"unpcklps {$src2, $dst|$dst, $src2}",
@@ -808,10 +830,16 @@
UNPCKL_shuffle_mask)))]>;
def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "unpcklpd {$src2, $dst|$dst, $src2}", []>;
+ "unpcklpd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKL_shuffle_mask)))]>;
def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, f128mem:$src2),
- "unpcklpd {$src2, $dst|$dst, $src2}", []>;
+ "unpcklpd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2f64 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKL_shuffle_mask)))]>;
}
//===----------------------------------------------------------------------===//
@@ -940,35 +968,65 @@
UNPCKL_shuffle_mask)))]>;
def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpcklqdq {$src2, $dst|$dst, $src2}", []>;
+ "punpcklqdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKL_shuffle_mask)))]>;
def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
- "punpcklqdq {$src2, $dst|$dst, $src2}", []>;
+ "punpcklqdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKL_shuffle_mask)))]>;
def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpckhbw {$src2, $dst|$dst, $src2}", []>;
+ "punpckhbw {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
- "punpckhbw {$src2, $dst|$dst, $src2}", []>;
+ "punpckhbw {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v16i8 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpckhwd {$src2, $dst|$dst, $src2}", []>;
+ "punpckhwd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
- "punpckhwd {$src2, $dst|$dst, $src2}", []>;
+ "punpckhwd {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v8i16 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpckhdq {$src2, $dst|$dst, $src2}", []>;
+ "punpckhdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
- "punpckhdq {$src2, $dst|$dst, $src2}", []>;
+ "punpckhdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4i32 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
(ops VR128:$dst, VR128:$src1, VR128:$src2),
- "punpckhdq {$src2, $dst|$dst, $src2}", []>;
+ "punpckhdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
+ UNPCKH_shuffle_mask)))]>;
def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
(ops VR128:$dst, VR128:$src1, i128mem:$src2),
- "punpckhqdq {$src2, $dst|$dst, $src2}", []>;
+ "punpckhqdq {$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v2i64 (vector_shuffle VR128:$src1, (load addr:$src2),
+ UNPCKH_shuffle_mask)))]>;
}
//===----------------------------------------------------------------------===//
@@ -1147,29 +1205,16 @@
(v4i32 (PSHUFDrr VR128:$src, PSHUFD_shuffle_mask:$sm))>,
Requires<[HasSSE2]>;
-// Shuffle v2f64 / v2i64
-def : Pat<(vector_shuffle (v2f64 VR128:$src1), (v2f64 VR128:$src2),
- MOVLHPSorUNPCKLPD_shuffle_mask:$sm),
- (v2f64 (MOVLHPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
-def : Pat<(vector_shuffle (v2f64 VR128:$src1), (v2f64 VR128:$src2),
- MOVHLPS_shuffle_mask:$sm),
- (v2f64 (MOVHLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
-def : Pat<(vector_shuffle (v2f64 VR128:$src1), (v2f64 VR128:$src2),
- UNPCKHPD_shuffle_mask:$sm),
- (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
-def : Pat<(vector_shuffle (v2f64 VR128:$src1), (loadv2f64 addr:$src2),
- MOVLHPSorUNPCKLPD_shuffle_mask:$sm),
- (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
-
+// Shuffle v2i64
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (v2i64 VR128:$src2),
- MOVLHPSorUNPCKLPD_shuffle_mask:$sm),
+ UNPCKL_shuffle_mask:$sm),
(v2i64 (MOVLHPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
def : Pat<(vector_shuffle (v2i64 VR128:$src1), (v2i64 VR128:$src2),
MOVHLPS_shuffle_mask:$sm),
(v2i64 (MOVHLPSrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
-def : Pat<(vector_shuffle (v2i64 VR128:$src1), (v2i64 VR128:$src2),
- UNPCKHPD_shuffle_mask:$sm),
- (v2i64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
-def : Pat<(vector_shuffle (v2i64 VR128:$src1), (loadv2i64 addr:$src2),
- MOVLHPSorUNPCKLPD_shuffle_mask:$sm),
+def : Pat<(vector_shuffle (v2i64 VR128:$src1), (load addr:$src2),
+ UNPCKL_shuffle_mask:$sm),
(v2i64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
+def : Pat<(vector_shuffle (v2i64 VR128:$src1), (load addr:$src2),
+ UNPCKH_shuffle_mask:$sm),
+ (v2i64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
More information about the llvm-commits
mailing list