[llvm-commits] CVS: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
Nate Begeman
natebegeman at mac.com
Wed Nov 30 00:22:41 PST 2005
Changes in directory llvm/lib/Target/PowerPC:
PPCISelDAGToDAG.cpp updated: 1.130 -> 1.131
---
Log message:
First chunk of actually generating vector code for packed types. These
changes allow us to generate the following code:
_foo:
li r2, 0
lvx v0, r2, r3
vaddfp v0, v0, v0
stvx v0, r2, r3
blr
for this llvm:
void %foo(<4 x float>* %a) {
entry:
%tmp1 = load <4 x float>* %a
%tmp2 = add <4 x float> %tmp1, %tmp1
store <4 x float> %tmp2, <4 x float>* %a
ret void
}
---
Diffs of the changes: (+51 -11)
PPCISelDAGToDAG.cpp | 62 ++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 51 insertions(+), 11 deletions(-)
Index: llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
diff -u llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.130 llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.131
--- llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp:1.130 Thu Nov 17 12:26:56 2005
+++ llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp Wed Nov 30 02:22:07 2005
@@ -73,6 +73,11 @@
/// load/store instruction, and return true if it should be an indexed [r+r]
/// operation.
bool SelectAddr(SDOperand Addr, SDOperand &Op1, SDOperand &Op2);
+
+ /// SelectAddrIndexed - Given the specified addressed, force it to be
+ /// represented as an indexed [r+r] operation, rather than possibly
+ /// returning [r+imm] as SelectAddr may.
+ void SelectAddrIndexed(SDOperand Addr, SDOperand &Op1, SDOperand &Op2);
SDOperand BuildSDIVSequence(SDNode *N);
SDOperand BuildUDIVSequence(SDNode *N);
@@ -428,7 +433,7 @@
}
}
- if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
Op1 = getI32Imm(0);
Op2 = CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
return false;
@@ -445,6 +450,26 @@
return false;
}
+/// SelectAddrIndexed - Given the specified addressed, force it to be
+/// represented as an indexed [r+r] operation, rather than possibly
+/// returning [r+imm] as SelectAddr may.
+void PPCDAGToDAGISel::SelectAddrIndexed(SDOperand Addr, SDOperand &Op1,
+ SDOperand &Op2) {
+ if (Addr.getOpcode() == ISD::ADD) {
+ Op1 = Select(Addr.getOperand(0));
+ Op2 = Select(Addr.getOperand(1));
+ return;
+ }
+
+ if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Op1 = CurDAG->getTargetNode(PPC::LI, MVT::i32, getI32Imm(0));
+ Op2 = CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
+ return;
+ }
+ Op1 = CurDAG->getTargetNode(PPC::LI, MVT::i32, getI32Imm(0));
+ Op2 = Select(Addr);
+}
+
/// SelectCC - Select a comparison of the specified values with the specified
/// condition code, returning the CR# of the expression.
SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS,
@@ -916,9 +941,8 @@
}
}
- CurDAG->SelectNodeTo(N, Ty == MVT::f64 ? PPC::FADD : PPC::FADDS, Ty,
- Select(N->getOperand(0)), Select(N->getOperand(1)));
- return SDOperand(N, 0);
+ // Other cases are autogenerated.
+ break;
}
case ISD::FSUB: {
MVT::ValueType Ty = N->getValueType(0);
@@ -942,10 +966,9 @@
return SDOperand(N, 0);
}
}
- CurDAG->SelectNodeTo(N, Ty == MVT::f64 ? PPC::FSUB : PPC::FSUBS, Ty,
- Select(N->getOperand(0)),
- Select(N->getOperand(1)));
- return SDOperand(N, 0);
+
+ // Other cases are autogenerated.
+ break;
}
case ISD::SDIV: {
// FIXME: since this depends on the setting of the carry flag from the srawi
@@ -1074,10 +1097,17 @@
case ISD::ZEXTLOAD:
case ISD::SEXTLOAD: {
SDOperand Op1, Op2;
- bool isIdx = SelectAddr(N->getOperand(1), Op1, Op2);
-
+ // If this is a vector load, then force this to be indexed addressing, since
+ // altivec does not have immediate offsets for loads.
+ bool isIdx = true;
+ if (N->getOpcode() == ISD::LOAD && MVT::isVector(N->getValueType(0))) {
+ SelectAddrIndexed(N->getOperand(1), Op1, Op2);
+ } else {
+ isIdx = SelectAddr(N->getOperand(1), Op1, Op2);
+ }
MVT::ValueType TypeBeingLoaded = (N->getOpcode() == ISD::LOAD) ?
N->getValueType(0) : cast<VTSDNode>(N->getOperand(3))->getVT();
+
unsigned Opc;
switch (TypeBeingLoaded) {
default: N->dump(); assert(0 && "Cannot load this type!");
@@ -1093,6 +1123,7 @@
case MVT::i32: Opc = isIdx ? PPC::LWZX : PPC::LWZ; break;
case MVT::f32: Opc = isIdx ? PPC::LFSX : PPC::LFS; break;
case MVT::f64: Opc = isIdx ? PPC::LFDX : PPC::LFD; break;
+ case MVT::v4f32: Opc = PPC::LVX; break;
}
// If this is an f32 -> f64 load, emit the f32 load, then use an 'extending
@@ -1119,7 +1150,15 @@
case ISD::TRUNCSTORE:
case ISD::STORE: {
SDOperand AddrOp1, AddrOp2;
- bool isIdx = SelectAddr(N->getOperand(2), AddrOp1, AddrOp2);
+ // If this is a vector store, then force this to be indexed addressing,
+ // since altivec does not have immediate offsets for stores.
+ bool isIdx = true;
+ if (N->getOpcode() == ISD::STORE &&
+ MVT::isVector(N->getOperand(1).getValueType())) {
+ SelectAddrIndexed(N->getOperand(2), AddrOp1, AddrOp2);
+ } else {
+ isIdx = SelectAddr(N->getOperand(2), AddrOp1, AddrOp2);
+ }
unsigned Opc;
if (N->getOpcode() == ISD::STORE) {
@@ -1128,6 +1167,7 @@
case MVT::i32: Opc = isIdx ? PPC::STWX : PPC::STW; break;
case MVT::f64: Opc = isIdx ? PPC::STFDX : PPC::STFD; break;
case MVT::f32: Opc = isIdx ? PPC::STFSX : PPC::STFS; break;
+ case MVT::v4f32: Opc = PPC::STVX;
}
} else { //ISD::TRUNCSTORE
switch(cast<VTSDNode>(N->getOperand(4))->getVT()) {
More information about the llvm-commits
mailing list