[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86InstrMMX.td
Bill Wendling
isanbard at gmail.com
Tue Mar 6 10:54:00 PST 2007
Changes in directory llvm/lib/Target/X86:
X86ISelLowering.cpp updated: 1.361 -> 1.362
X86InstrMMX.td updated: 1.14 -> 1.15
---
Log message:
Add LOAD/STORE support for MMX.
---
Diffs of the changes: (+47 -22)
X86ISelLowering.cpp | 3 ++
X86InstrMMX.td | 66 ++++++++++++++++++++++++++++++++++------------------
2 files changed, 47 insertions(+), 22 deletions(-)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.361 llvm/lib/Target/X86/X86ISelLowering.cpp:1.362
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.361 Tue Mar 6 02:12:33 2007
+++ llvm/lib/Target/X86/X86ISelLowering.cpp Tue Mar 6 12:53:42 2007
@@ -327,6 +327,9 @@
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
// FIXME: add MMX packed arithmetics
+ setOperationAction(ISD::LOAD, MVT::v8i8, Legal);
+ setOperationAction(ISD::LOAD, MVT::v4i16, Legal);
+ setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Expand);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Expand);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Expand);
Index: llvm/lib/Target/X86/X86InstrMMX.td
diff -u llvm/lib/Target/X86/X86InstrMMX.td:1.14 llvm/lib/Target/X86/X86InstrMMX.td:1.15
--- llvm/lib/Target/X86/X86InstrMMX.td:1.14 Mon Mar 5 17:09:45 2007
+++ llvm/lib/Target/X86/X86InstrMMX.td Tue Mar 6 12:53:42 2007
@@ -13,7 +13,10 @@
//
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
// Instruction templates
+//===----------------------------------------------------------------------===//
+
// MMXI - MMX instructions with TB prefix.
// MMX2I - MMX / SSE2 instructions with TB and OpSize prefixes.
// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
@@ -30,33 +33,42 @@
[(set VR64:$dst, (v8i8 (undef)))]>,
Requires<[HasMMX]>;
+def : Pat<(v8i8 (undef)), (IMPLICIT_DEF_VR64)>, Requires<[HasMMX]>;
def : Pat<(v4i16 (undef)), (IMPLICIT_DEF_VR64)>, Requires<[HasMMX]>;
def : Pat<(v2i32 (undef)), (IMPLICIT_DEF_VR64)>, Requires<[HasMMX]>;
-// EMMS
-def EMMS : I<0x77, RawFrm, (ops), "emms", [(int_x86_mmx_emms)]>, TB,
- Requires<[HasMMX]>;
+//===----------------------------------------------------------------------===//
+// MMX Pattern Fragments
+//===----------------------------------------------------------------------===//
-// Move Instructions
-def MOVD64rr : I<0x6E, MRMSrcReg, (ops VR64:$dst, GR32:$src),
- "movd {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
-def MOVD64rm : I<0x6E, MRMSrcMem, (ops VR64:$dst, i32mem:$src),
- "movd {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
-def MOVD64mr : I<0x7E, MRMDestMem, (ops i32mem:$dst, VR64:$src),
- "movd {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
+def loadv2i32 : PatFrag<(ops node:$ptr), (v2i32 (load node:$ptr))>;
-def MOVQ64rr : I<0x6F, MRMSrcReg, (ops VR64:$dst, VR64:$src),
- "movq {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
-def MOVQ64rm : I<0x6F, MRMSrcMem, (ops VR64:$dst, i64mem:$src),
- "movq {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
-def MOVQ64mr : I<0x7F, MRMDestMem, (ops i64mem:$dst, VR64:$src),
- "movq {$src, $dst|$dst, $src}", []>, TB,
- Requires<[HasMMX]>;
+//===----------------------------------------------------------------------===//
+// MMX EMMS Instruction
+//===----------------------------------------------------------------------===//
+
+def EMMS : MMXI<0x77, RawFrm, (ops), "emms", [(int_x86_mmx_emms)]>;
+
+//===----------------------------------------------------------------------===//
+// MMX Scalar Instructions
+//===----------------------------------------------------------------------===//
+
+// Move Instructions
+def MOVD64rr : MMXI<0x6E, MRMSrcReg, (ops VR64:$dst, GR32:$src),
+ "movd {$src, $dst|$dst, $src}", []>;
+def MOVD64rm : MMXI<0x6E, MRMSrcMem, (ops VR64:$dst, i32mem:$src),
+ "movd {$src, $dst|$dst, $src}", []>;
+def MOVD64mr : MMXI<0x7E, MRMDestMem, (ops i32mem:$dst, VR64:$src),
+ "movd {$src, $dst|$dst, $src}", []>;
+
+def MOVQ64rr : MMXI<0x6F, MRMSrcReg, (ops VR64:$dst, VR64:$src),
+ "movq {$src, $dst|$dst, $src}", []>;
+def MOVQ64rm : MMXI<0x6F, MRMSrcMem, (ops VR64:$dst, i64mem:$src),
+ "movq {$src, $dst|$dst, $src}",
+ [(set VR64:$dst, (loadv2i32 addr:$src))]>;
+def MOVQ64mr : MMXI<0x7F, MRMDestMem, (ops i64mem:$dst, VR64:$src),
+ "movq {$src, $dst|$dst, $src}",
+ [(store (v2i32 VR64:$src), addr:$dst)]>;
// Conversion instructions
def CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (ops VR128:$dst, VR64:$src),
@@ -98,3 +110,13 @@
def MASKMOVQ : I<0xF7, MRMDestMem, (ops VR64:$src, VR64:$mask),
"maskmovq {$mask, $src|$src, $mask}", []>, TB,
Requires<[HasMMX]>;
+
+//===----------------------------------------------------------------------===//
+// Non-Instruction Patterns
+//===----------------------------------------------------------------------===//
+
+// Store 64-bit integer vector values.
+def : Pat<(store (v8i8 VR64:$src), addr:$dst),
+ (MOVQ64mr addr:$dst, VR64:$src)>;
+def : Pat<(store (v4i16 VR64:$src), addr:$dst),
+ (MOVQ64mr addr:$dst, VR64:$src)>;
More information about the llvm-commits
mailing list