[llvm-commits] CVS: llvm/lib/Target/PowerPC/PPCISelLowering.cpp PPCInstrAltivec.td README_ALTIVEC.txt
Chris Lattner
lattner at cs.uiuc.edu
Sat Apr 15 18:38:09 PDT 2006
Changes in directory llvm/lib/Target/PowerPC:
PPCISelLowering.cpp updated: 1.153 -> 1.154
PPCInstrAltivec.td updated: 1.50 -> 1.51
README_ALTIVEC.txt updated: 1.23 -> 1.24
---
Log message:
Implement a TODO: have the legalizer canonicalize a bunch of operations to
one type (v4i32) so that we don't have to write patterns for each type, and
so that more CSE opportunities are exposed.
---
Diffs of the changes: (+32 -68)
PPCISelLowering.cpp | 33 ++++++++++++++++++++++++---------
PPCInstrAltivec.td | 47 ++++++-----------------------------------------
README_ALTIVEC.txt | 20 ++------------------
3 files changed, 32 insertions(+), 68 deletions(-)
Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp
diff -u llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.153 llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.154
--- llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.153 Sat Apr 15 20:01:29 2006
+++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp Sat Apr 15 20:37:57 2006
@@ -87,10 +87,6 @@
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
- setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
- setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
- setOperationAction(ISD::SELECT, MVT::v8i16, Expand);
- setOperationAction(ISD::SELECT, MVT::v16i8, Expand);
// PowerPC wants to turn select_cc of FP into fsel when possible.
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
@@ -178,17 +174,29 @@
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
VT != (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
- // add/sub/and/or/xor are legal for all supported vector VT's.
+ // add/sub are legal for all supported vector VT's.
setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal);
// We promote all shuffles to v16i8.
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
- AddPromotedToType(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
+ AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
+
+ // We promote all non-typed operations to v4i32.
+ setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
+ AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
+ // No other operations are legal.
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
@@ -205,6 +213,13 @@
// with merges, splats, etc.
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
+ setOperationAction(ISD::AND , MVT::v4i32, Legal);
+ setOperationAction(ISD::OR , MVT::v4i32, Legal);
+ setOperationAction(ISD::XOR , MVT::v4i32, Legal);
+ setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
+ setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
+ setOperationAction(ISD::STORE , MVT::v4i32, Legal);
+
addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
Index: llvm/lib/Target/PowerPC/PPCInstrAltivec.td
diff -u llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.50 llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.51
--- llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.50 Sat Apr 15 18:45:24 2006
+++ llvm/lib/Target/PowerPC/PPCInstrAltivec.td Sat Apr 15 20:37:57 2006
@@ -158,7 +158,7 @@
// Instruction Definitions.
def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
- [(set VRRC:$rD, (v4f32 (undef)))]>;
+ [(set VRRC:$rD, (v4i32 (undef)))]>;
let noResults = 1 in {
def DSS : DSS_Form<822, (ops u5imm:$A, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2),
@@ -541,25 +541,16 @@
(DSTST 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>;
// Undef.
-def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
-def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
-def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
+def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VRRC)>;
+def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VRRC)>;
+def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VRRC)>;
// Loads.
-def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
-def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
-def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
// Stores.
-def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
- (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
-def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
- (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
(STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
-def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
- (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
// Bit conversions.
def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
@@ -603,37 +594,11 @@
(VMRGHW VRRC:$vA, VRRC:$vA)>;
// Logical Operations
-def : Pat<(v16i8 (vnot VRRC:$vA)), (v16i8 (VNOR VRRC:$vA, VRRC:$vA))>;
-def : Pat<(v8i16 (vnot VRRC:$vA)), (v8i16 (VNOR VRRC:$vA, VRRC:$vA))>;
def : Pat<(v4i32 (vnot VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>;
-
-def : Pat<(v16i8 (vnot_conv VRRC:$vA)), (v16i8 (VNOR VRRC:$vA, VRRC:$vA))>;
-def : Pat<(v8i16 (vnot_conv VRRC:$vA)), (v8i16 (VNOR VRRC:$vA, VRRC:$vA))>;
def : Pat<(v4i32 (vnot_conv VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>;
-
-def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
-def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
-
-def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
- (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
- (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
-
-
-def : Pat<(v16i8 (vnot_conv (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (vnot_conv (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))),(v4i32 (VNOR VRRC:$A, VRRC:$B))>;
-def : Pat<(v16i8 (and VRRC:$A, (vnot_conv VRRC:$B))),
- (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
-def : Pat<(v8i16 (and VRRC:$A, (vnot_conv VRRC:$B))),
- (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
+def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))),
+ (v4i32 (VNOR VRRC:$A, VRRC:$B))>;
def : Pat<(v4i32 (and VRRC:$A, (vnot_conv VRRC:$B))),
(v4i32 (VANDC VRRC:$A, VRRC:$B))>;
Index: llvm/lib/Target/PowerPC/README_ALTIVEC.txt
diff -u llvm/lib/Target/PowerPC/README_ALTIVEC.txt:1.23 llvm/lib/Target/PowerPC/README_ALTIVEC.txt:1.24
--- llvm/lib/Target/PowerPC/README_ALTIVEC.txt:1.23 Sat Apr 15 20:01:29 2006
+++ llvm/lib/Target/PowerPC/README_ALTIVEC.txt Sat Apr 15 20:37:57 2006
@@ -65,7 +65,7 @@
//===----------------------------------------------------------------------===//
-Implement passing/returning vectors by value.
+Implement passing vectors by value.
//===----------------------------------------------------------------------===//
@@ -75,7 +75,7 @@
//===----------------------------------------------------------------------===//
We currently codegen SCALAR_TO_VECTOR as a store of the scalar to a 16-byte
-aligned stack slot, followed by a lve*x/vperm. We should probably just store it
+aligned stack slot, followed by a load/vperm. We should probably just store it
to a scalar stack slot, then use lvsl/vperm to load it. If the value is already
in memory, this is a huge win.
@@ -92,22 +92,6 @@
//===----------------------------------------------------------------------===//
-Instead of writting a pattern for type-agnostic operations (e.g. gen-zero, load,
-store, and, ...) in every supported type, make legalize do the work. We should
-have a canonical type that we want operations changed to (e.g. v4i32 for
-build_vector) and legalize should change non-identical types to thse. This is
-similar to what it does for operations that are only supported in some types,
-e.g. x86 cmov (not supported on bytes).
-
-This would fix two problems:
-1. Writing patterns multiple times.
-2. Identical operations in different types are not getting CSE'd.
-
-We already do this for shuffle and build_vector. We need load,undef,and,or,xor,
-etc.
-
-//===----------------------------------------------------------------------===//
-
Implement multiply for vector integer types, to avoid the horrible scalarized
code produced by legalize.
More information about the llvm-commits
mailing list