[llvm-commits] CVS: llvm/lib/Target/PowerPC/PPCISelLowering.cpp PPCISelLowering.h PPCInstrAltivec.td
Chris Lattner
lattner at cs.uiuc.edu
Tue Apr 4 10:25:43 PDT 2006
Changes in directory llvm/lib/Target/PowerPC:
PPCISelLowering.cpp updated: 1.128 -> 1.129
PPCISelLowering.h updated: 1.38 -> 1.39
PPCInstrAltivec.td updated: 1.32 -> 1.33
---
Log message:
Ask legalize to promote all vector shuffles to be v16i8 instead of having to
handle all 4 PPC vector types. This simplifies the matching code and allows
us to eliminate a bunch of patterns. This also adds cases we were missing,
such as CodeGen/PowerPC/vec_splat.ll:splat_h.
---
Diffs of the changes: (+68 -45)
PPCISelLowering.cpp | 56 ++++++++++++++++++++++++++++++++++++----------------
PPCISelLowering.h | 4 +--
PPCInstrAltivec.td | 53 +++++++++++++++++++++++++------------------------
3 files changed, 68 insertions(+), 45 deletions(-)
Index: llvm/lib/Target/PowerPC/PPCISelLowering.cpp
diff -u llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.128 llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.129
--- llvm/lib/Target/PowerPC/PPCISelLowering.cpp:1.128 Mon Apr 3 18:58:04 2006
+++ llvm/lib/Target/PowerPC/PPCISelLowering.cpp Tue Apr 4 12:25:31 2006
@@ -181,8 +181,9 @@
setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal);
setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal);
- // We can custom expand all VECTOR_SHUFFLEs to VPERM.
- setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
+ // We promote all shuffles to v16i8.
+ setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
+ AddPromotedToType(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
@@ -196,6 +197,10 @@
setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
}
+ // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
+ // with merges, splats, etc.
+ setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
+
addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
@@ -266,33 +271,47 @@
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
-bool PPC::isSplatShuffleMask(SDNode *N) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR);
-
- // We can only splat 8-bit, 16-bit, and 32-bit quantities.
- if (N->getNumOperands() != 4 && N->getNumOperands() != 8 &&
- N->getNumOperands() != 16)
- return false;
+bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
+ assert(N->getOpcode() == ISD::BUILD_VECTOR &&
+ N->getNumOperands() == 16 &&
+ (EltSize == 1 || EltSize == 2 || EltSize == 4));
// This is a splat operation if each element of the permute is the same, and
// if the value doesn't reference the second vector.
+ unsigned ElementBase = 0;
SDOperand Elt = N->getOperand(0);
+ if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
+ ElementBase = EltV->getValue();
+ else
+ return false; // FIXME: Handle UNDEF elements too!
+
+ if (cast<ConstantSDNode>(Elt)->getValue() >= 16)
+ return false;
+
+ // Check that they are consequtive.
+ for (unsigned i = 1; i != EltSize; ++i) {
+ if (!isa<ConstantSDNode>(N->getOperand(i)) ||
+ cast<ConstantSDNode>(N->getOperand(i))->getValue() != i+ElementBase)
+ return false;
+ }
+
assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
- for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) {
+ for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
assert(isa<ConstantSDNode>(N->getOperand(i)) &&
"Invalid VECTOR_SHUFFLE mask!");
- if (N->getOperand(i) != Elt) return false;
+ for (unsigned j = 0; j != EltSize; ++j)
+ if (N->getOperand(i+j) != N->getOperand(j))
+ return false;
}
- // Make sure it is a splat of the first vector operand.
- return cast<ConstantSDNode>(Elt)->getValue() < N->getNumOperands();
+ return true;
}
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
-unsigned PPC::getVSPLTImmediate(SDNode *N) {
- assert(isSplatShuffleMask(N));
- return cast<ConstantSDNode>(N->getOperand(0))->getValue();
+unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
+ assert(isSplatShuffleMask(N, EltSize));
+ return cast<ConstantSDNode>(N->getOperand(0))->getValue() / EltSize;
}
/// isVecSplatImm - Return true if this is a build_vector of constants which
@@ -734,7 +753,10 @@
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
// selected by the instruction selector.
- if (PPC::isSplatShuffleMask(PermMask.Val) && V2.getOpcode() == ISD::UNDEF)
+ if (V2.getOpcode() == ISD::UNDEF &&
+ (PPC::isSplatShuffleMask(PermMask.Val, 1) ||
+ PPC::isSplatShuffleMask(PermMask.Val, 2) ||
+ PPC::isSplatShuffleMask(PermMask.Val, 4)))
break;
// TODO: Handle more cases, and also handle cases that are cheaper to do as
Index: llvm/lib/Target/PowerPC/PPCISelLowering.h
diff -u llvm/lib/Target/PowerPC/PPCISelLowering.h:1.38 llvm/lib/Target/PowerPC/PPCISelLowering.h:1.39
--- llvm/lib/Target/PowerPC/PPCISelLowering.h:1.38 Sun Apr 2 00:26:07 2006
+++ llvm/lib/Target/PowerPC/PPCISelLowering.h Tue Apr 4 12:25:31 2006
@@ -105,11 +105,11 @@
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
- bool isSplatShuffleMask(SDNode *N);
+ bool isSplatShuffleMask(SDNode *N, unsigned EltSize);
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
- unsigned getVSPLTImmediate(SDNode *N);
+ unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
/// isVecSplatImm - Return true if this is a build_vector of constants which
/// can be formed by using a vspltis[bhw] instruction. The ByteSize field
Index: llvm/lib/Target/PowerPC/PPCInstrAltivec.td
diff -u llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.32 llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.33
--- llvm/lib/Target/PowerPC/PPCInstrAltivec.td:1.32 Mon Apr 3 19:05:13 2006
+++ llvm/lib/Target/PowerPC/PPCInstrAltivec.td Tue Apr 4 12:25:31 2006
@@ -15,14 +15,25 @@
// Altivec transformation functions and pattern fragments.
//
-// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
-def VSPLT_get_imm : SDNodeXForm<build_vector, [{
- return getI32Imm(PPC::getVSPLTImmediate(N));
+// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
+def VSPLTB_get_imm : SDNodeXForm<build_vector, [{
+ return getI32Imm(PPC::getVSPLTImmediate(N, 1));
}]>;
-
-def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isSplatShuffleMask(N);
-}], VSPLT_get_imm>;
+def VSPLTB_shuffle_mask : PatLeaf<(build_vector), [{
+ return PPC::isSplatShuffleMask(N, 1);
+}], VSPLTB_get_imm>;
+def VSPLTH_get_imm : SDNodeXForm<build_vector, [{
+ return getI32Imm(PPC::getVSPLTImmediate(N, 2));
+}]>;
+def VSPLTH_shuffle_mask : PatLeaf<(build_vector), [{
+ return PPC::isSplatShuffleMask(N, 2);
+}], VSPLTH_get_imm>;
+def VSPLTW_get_imm : SDNodeXForm<build_vector, [{
+ return getI32Imm(PPC::getVSPLTImmediate(N, 4));
+}]>;
+def VSPLTW_shuffle_mask : PatLeaf<(build_vector), [{
+ return PPC::isSplatShuffleMask(N, 4);
+}], VSPLTW_get_imm>;
// VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
@@ -55,11 +66,6 @@
return PPC::isVecSplatImm(N, 4);
}], VSPLTISW_get_imm>;
-class isVDOT { // vector dot instruction.
- list<Register> Defs = [CR6];
- bit RC = 1;
-}
-
//===----------------------------------------------------------------------===//
// Helpers for defining instructions that directly correspond to intrinsics.
@@ -294,15 +300,15 @@
def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
"vspltb $vD, $vB, $UIMM", VecPerm,
[(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
- VSPLT_shuffle_mask:$UIMM))]>;
+ VSPLTB_shuffle_mask:$UIMM))]>;
def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
"vsplth $vD, $vB, $UIMM", VecPerm,
- [(set VRRC:$vD, (vector_shuffle (v8i16 VRRC:$vB), (undef),
- VSPLT_shuffle_mask:$UIMM))]>;
+ [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
+ VSPLTH_shuffle_mask:$UIMM))]>;
def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
"vspltw $vD, $vB, $UIMM", VecPerm,
- [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
- VSPLT_shuffle_mask:$UIMM))]>;
+ [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
+ VSPLTW_shuffle_mask:$UIMM))]>;
def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>;
def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>;
@@ -355,7 +361,10 @@
[(set VRRC:$vD, (Ty (PPCvcmp VRRC:$vA, VRRC:$vB, xo)))]>;
class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
: VXRForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFPCompare,
- [(set VRRC:$vD, (Ty (PPCvcmp_o VRRC:$vA, VRRC:$vB, xo)))]>,isVDOT;
+ [(set VRRC:$vD, (Ty (PPCvcmp_o VRRC:$vA, VRRC:$vB, xo)))]> {
+ let Defs = [CR6];
+ let RC = 1;
+}
// f32 element comparisons.0
def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
@@ -487,14 +496,6 @@
(VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
(VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
-def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
- (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
-def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
- (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
-def : Pat<(PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
- (v4f32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
-def : Pat<(PPCvperm (v8i16 VRRC:$vA), VRRC:$vB, VRRC:$vC),
- (v8i16 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC),
(v16i8 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
More information about the llvm-commits
mailing list