[llvm-commits] [llvm] r56066 - in /llvm/trunk: include/llvm/CodeGen/FastISel.h include/llvm/Target/TargetLowering.h lib/CodeGen/SelectionDAG/FastISel.cpp lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp lib/Target/X86/X86FastISel.cpp lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86ISelLowering.h
Dan Gohman
gohman at apple.com
Wed Sep 10 13:11:02 PDT 2008
Author: djg
Date: Wed Sep 10 15:11:02 2008
New Revision: 56066
URL: http://llvm.org/viewvc/llvm-project?rev=56066&view=rev
Log:
Add X86FastISel support for static allocas, and refences
to static allocas. As part of this change, refactor the
address mode code for laods and stores.
Modified:
llvm/trunk/include/llvm/CodeGen/FastISel.h
llvm/trunk/include/llvm/Target/TargetLowering.h
llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
llvm/trunk/lib/Target/X86/X86FastISel.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.h
Modified: llvm/trunk/include/llvm/CodeGen/FastISel.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/FastISel.h?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/FastISel.h (original)
+++ llvm/trunk/include/llvm/CodeGen/FastISel.h Wed Sep 10 15:11:02 2008
@@ -20,10 +20,12 @@
namespace llvm {
+class AllocaInst;
class ConstantFP;
class MachineBasicBlock;
class MachineConstantPool;
class MachineFunction;
+class MachineFrameInfo;
class MachineRegisterInfo;
class TargetData;
class TargetInstrInfo;
@@ -40,8 +42,11 @@
DenseMap<const Value *, unsigned> LocalValueMap;
DenseMap<const Value *, unsigned> &ValueMap;
DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
+ DenseMap<const AllocaInst *, int> &StaticAllocaMap;
MachineFunction &MF;
MachineRegisterInfo &MRI;
+ MachineFrameInfo &MFI;
+ MachineConstantPool &MCP;
const TargetMachine &TM;
const TargetData &TD;
const TargetInstrInfo &TII;
@@ -90,7 +95,8 @@
protected:
FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm);
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am);
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type and opcode
@@ -234,8 +240,13 @@
/// TargetMaterializeConstant - Emit a constant in a register using
/// target-specific logic, such as constant pool loads.
- virtual unsigned TargetMaterializeConstant(Constant* C,
- MachineConstantPool* MCP) {
+ virtual unsigned TargetMaterializeConstant(Constant* C) {
+ return 0;
+ }
+
+ /// TargetMaterializeAlloca - Emit an alloca address in a register using
+ /// target-specific logic.
+ virtual unsigned TargetMaterializeAlloca(AllocaInst* C) {
return 0;
}
Modified: llvm/trunk/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetLowering.h?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/Target/TargetLowering.h Wed Sep 10 15:11:02 2008
@@ -33,6 +33,7 @@
#include <vector>
namespace llvm {
+ class AllocaInst;
class Function;
class FastISel;
class MachineBasicBlock;
@@ -1120,7 +1121,8 @@
virtual FastISel *
createFastISel(MachineFunction &,
DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &) {
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &,
+ DenseMap<const AllocaInst *, int> &) {
return 0;
}
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp Wed Sep 10 15:11:02 2008
@@ -37,14 +37,14 @@
return 0;
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() > 64)
- return TargetMaterializeConstant(CI,
- MBB->getParent()->getConstantPool());
+ return TargetMaterializeConstant(CI);
// Don't cache constant materializations. To do so would require
// tracking what uses they dominate.
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<GlobalValue>(V)) {
- return TargetMaterializeConstant(dyn_cast<Constant>(V),
- MBB->getParent()->getConstantPool());
+ return TargetMaterializeConstant(cast<Constant>(V));
+ } else if (isa<AllocaInst>(V)) {
+ return TargetMaterializeAlloca(cast<AllocaInst>(V));
} else if (isa<ConstantPointerNull>(V)) {
Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
@@ -58,19 +58,16 @@
uint32_t IntBitWidth = IntVT.getSizeInBits();
if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
APFloat::rmTowardZero) != APFloat::opOK)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());
+ return TargetMaterializeConstant(CF);
APInt IntVal(IntBitWidth, 2, x);
unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::Constant, IntVal.getZExtValue());
if (IntegerReg == 0)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());
+ return TargetMaterializeConstant(CF);
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
if (Reg == 0)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());;
+ return TargetMaterializeConstant(CF);
}
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (!SelectOperator(CE, CE->getOpcode())) return 0;
@@ -83,8 +80,7 @@
}
if (!Reg && isa<Constant>(V))
- return TargetMaterializeConstant(cast<Constant>(V),
- MBB->getParent()->getConstantPool());
+ return TargetMaterializeConstant(cast<Constant>(V));
LocalValueMap[V] = Reg;
return Reg;
@@ -416,6 +412,14 @@
case Instruction::PHI:
// PHI nodes are already emitted.
return true;
+
+ case Instruction::Alloca:
+ // FunctionLowering has the static-sized case covered.
+ if (StaticAllocaMap.count(cast<AllocaInst>(I)))
+ return true;
+
+ // Dynamic-sized alloca is not handled yet.
+ return false;
case Instruction::BitCast:
return SelectBitCast(I);
@@ -453,12 +457,16 @@
FastISel::FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am)
: MBB(0),
ValueMap(vm),
MBBMap(bm),
+ StaticAllocaMap(am),
MF(mf),
MRI(MF.getRegInfo()),
+ MFI(*MF.getFrameInfo()),
+ MCP(*MF.getConstantPool()),
TM(MF.getTarget()),
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Wed Sep 10 15:11:02 2008
@@ -737,7 +737,8 @@
// FastISel doesn't support EH landing pads, which require special handling.
if (EnableFastISel && !BB->isLandingPad()) {
if (FastISel *F = TLI.createFastISel(*FuncInfo->MF, FuncInfo->ValueMap,
- FuncInfo->MBBMap)) {
+ FuncInfo->MBBMap,
+ FuncInfo->StaticAllocaMap)) {
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Wed Sep 10 15:11:02 2008
@@ -31,10 +31,6 @@
using namespace llvm;
class X86FastISel : public FastISel {
- /// MFI - Keep track of objects allocated on the stack.
- ///
- MachineFrameInfo *MFI;
-
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
@@ -53,8 +49,9 @@
public:
explicit X86FastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
- : FastISel(mf, vm, bm), MFI(MF.getFrameInfo()) {
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am)
+ : FastISel(mf, vm, bm, am) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
@@ -66,10 +63,10 @@
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
+ bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
bool X86FastEmitStore(MVT VT, unsigned Val,
- unsigned Ptr, unsigned Offset, Value *V);
+ const X86AddressMode &AM);
bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
unsigned &ResultReg);
@@ -77,6 +74,8 @@
bool X86SelectConstAddr(Value *V, unsigned &Op0,
bool isCall = false, bool inReg = false);
+ bool X86SelectAddress(Value *V, X86AddressMode &AM);
+
bool X86SelectLoad(Instruction *I);
bool X86SelectStore(Instruction *I);
@@ -97,7 +96,9 @@
CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
- unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
+ unsigned TargetMaterializeConstant(Constant *C);
+
+ unsigned TargetMaterializeAlloca(AllocaInst *C);
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
@@ -151,7 +152,7 @@
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
-bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Ptr, Value *GV,
+bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
unsigned &ResultReg) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
@@ -200,12 +201,6 @@
}
ResultReg = createResultReg(RC);
- X86AddressMode AM;
- if (Ptr)
- // Address is in register.
- AM.Base.Reg = Ptr;
- else
- AM.GV = cast<GlobalValue>(GV);
addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
return true;
}
@@ -216,7 +211,7 @@
/// i.e. V. Return true if it is possible.
bool
X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
- unsigned Ptr, unsigned Offset, Value *V) {
+ const X86AddressMode &AM) {
// Get opcode and regclass of the output for the given store instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
@@ -263,13 +258,6 @@
break;
}
- X86AddressMode AM;
- if (Ptr) {
- // Address is in register.
- AM.Base.Reg = Ptr;
- AM.Disp = Offset;
- } else
- AM.GV = cast<GlobalValue>(V);
addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Val);
return true;
}
@@ -331,6 +319,39 @@
return true;
}
+/// X86SelectAddress - Attempt to fill in an address from the given value.
+///
+bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
+ // Look past bitcasts.
+ if (const BitCastInst *BC = dyn_cast<BitCastInst>(V))
+ return X86SelectAddress(BC->getOperand(0), AM);
+
+ if (const AllocaInst *A = dyn_cast<AllocaInst>(V)) {
+ DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
+ if (SI == StaticAllocaMap.end())
+ return false;
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = SI->second;
+ } else if (unsigned Ptr = lookUpRegForValue(V)) {
+ AM.Base.Reg = Ptr;
+ } else {
+ // Handle constant address.
+ // FIXME: If load type is something we can't handle, this can result in
+ // a dead stub load instruction.
+ if (isa<Constant>(V) && X86SelectConstAddr(V, AM.Base.Reg)) {
+ if (AM.Base.Reg == 0)
+ AM.GV = cast<GlobalValue>(V);
+ } else {
+ AM.Base.Reg = getRegForValue(V);
+ if (AM.Base.Reg == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ }
+ }
+
+ return true;
+}
+
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(Instruction* I) {
MVT VT;
@@ -341,21 +362,11 @@
// Unhandled operand. Halt "fast" selection and bail.
return false;
- Value *V = I->getOperand(1);
- unsigned Ptr = lookUpRegForValue(V);
- if (!Ptr) {
- // Handle constant load address.
- // FIXME: If load type is something we can't handle, this can result in
- // a dead stub load instruction.
- if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
- Ptr = getRegForValue(V);
- if (Ptr == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
- }
- }
+ X86AddressMode AM;
+ if (!X86SelectAddress(I->getOperand(1), AM))
+ return false;
- return X86FastEmitStore(VT, Val, Ptr, 0, V);
+ return X86FastEmitStore(VT, Val, AM);
}
/// X86SelectLoad - Select and emit code to implement load instructions.
@@ -365,22 +376,12 @@
if (!isTypeLegal(I->getType(), TLI, VT))
return false;
- Value *V = I->getOperand(0);
- unsigned Ptr = lookUpRegForValue(V);
- if (!Ptr) {
- // Handle constant load address.
- // FIXME: If load type is something we can't handle, this can result in
- // a dead stub load instruction.
- if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
- Ptr = getRegForValue(V);
- if (Ptr == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
- }
- }
+ X86AddressMode AM;
+ if (!X86SelectAddress(I->getOperand(0), AM))
+ return false;
unsigned ResultReg = 0;
- if (X86FastEmitLoad(VT, Ptr, V, ResultReg)) {
+ if (X86FastEmitLoad(VT, AM, ResultReg)) {
UpdateValueMap(I, ResultReg);
return true;
}
@@ -831,7 +832,10 @@
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
- X86FastEmitStore(ArgVT, Arg, StackPtr, LocMemOffset, NULL);
+ X86AddressMode AM;
+ AM.Base.Reg = StackPtr;
+ AM.Disp = LocMemOffset;
+ X86FastEmitStore(ArgVT, Arg, AM);
}
}
@@ -885,7 +889,7 @@
MVT ResVT = RVLocs[0].getValVT();
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
- int FI = MFI->CreateStackObject(MemSize, MemSize);
+ int FI = MFI.CreateStackObject(MemSize, MemSize);
addFrameReference(BuildMI(MBB, TII.get(Opc)), FI).addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
@@ -938,8 +942,7 @@
return false;
}
-unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
- MachineConstantPool* MCP) {
+unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
// Can't handle PIC-mode yet.
if (TM.getRelocationModel() == Reloc::PIC_)
return 0;
@@ -1010,15 +1013,27 @@
Align = Log2_64(Align);
}
- unsigned MCPOffset = MCP->getConstantPoolIndex(C, Align);
+ unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
addConstantPoolReference(BuildMI(MBB, TII.get(Opc), ResultReg), MCPOffset);
return ResultReg;
}
+unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
+ X86AddressMode AM;
+ if (!X86SelectAddress(C, AM))
+ return 0;
+ unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
+ TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
+ unsigned ResultReg = createResultReg(RC);
+ addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
+ return ResultReg;
+}
+
namespace llvm {
llvm::FastISel *X86::createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
- return new X86FastISel(mf, vm, bm);
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am) {
+ return new X86FastISel(mf, vm, bm, am);
}
}
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Sep 10 15:11:02 2008
@@ -1886,8 +1886,10 @@
X86TargetLowering::createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &vm,
DenseMap<const BasicBlock *,
- MachineBasicBlock *> &bm) {
- return X86::createFastISel(mf, vm, bm);
+ MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am) {
+
+ return X86::createFastISel(mf, vm, bm, am);
}
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=56066&r1=56065&r2=56066&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Wed Sep 10 15:11:02 2008
@@ -473,7 +473,8 @@
virtual FastISel *
createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &);
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &,
+ DenseMap<const AllocaInst *, int> &);
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
@@ -604,7 +605,8 @@
namespace X86 {
FastISel *createFastISel(MachineFunction &mf,
DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &);
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &,
+ DenseMap<const AllocaInst *, int> &);
}
}
More information about the llvm-commits
mailing list