[llvm-commits] [llvm] r55854 - /llvm/trunk/lib/Target/X86/X86FastISel.cpp
Evan Cheng
evan.cheng at apple.com
Fri Sep 5 14:00:03 PDT 2008
Author: evancheng
Date: Fri Sep 5 16:00:03 2008
New Revision: 55854
URL: http://llvm.org/viewvc/llvm-project?rev=55854&view=rev
Log:
Factor out code that emits load and store instructions.
Modified:
llvm/trunk/lib/Target/X86/X86FastISel.cpp
Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=55854&r1=55853&r2=55854&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Fri Sep 5 16:00:03 2008
@@ -45,6 +45,10 @@
#include "X86GenFastISel.inc"
private:
+ bool X86FastEmitLoad(MVT VT, unsigned Op0, Value *V, unsigned &RR);
+
+ bool X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V);
+
bool X86SelectConstAddr(Value *V, unsigned &Op0);
bool X86SelectLoad(Instruction *I);
@@ -60,69 +64,77 @@
bool X86SelectShift(Instruction *I);
bool X86SelectSelect(Instruction *I);
-
+
unsigned TargetMaterializeConstant(Constant *C, MachineConstantPool* MCP);
};
-/// X86SelectConstAddr - Select and emit code to materialize constant address.
-///
-bool X86FastISel::X86SelectConstAddr(Value *V,
- unsigned &Op0) {
- // FIXME: Only GlobalAddress for now.
- GlobalValue *GV = dyn_cast<GlobalValue>(V);
- if (!GV)
- return false;
-
- if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
- // Issue load from stub if necessary.
- unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
- if (TLI.getPointerTy() == MVT::i32) {
- Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
+/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
+/// The address is either pre-computed, i.e. Op0, or a GlobalAddress, i.e. V.
+/// Return true and the result register by reference if it is possible.
+bool X86FastISel::X86FastEmitLoad(MVT VT, unsigned Op0, Value *V,
+ unsigned &ResultReg) {
+ // Get opcode and regclass of the output for the given load instruction.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = NULL;
+ switch (VT.getSimpleVT()) {
+ default: return false;
+ case MVT::i8:
+ Opc = X86::MOV8rm;
+ RC = X86::GR8RegisterClass;
+ break;
+ case MVT::i16:
+ Opc = X86::MOV16rm;
+ RC = X86::GR16RegisterClass;
+ break;
+ case MVT::i32:
+ Opc = X86::MOV32rm;
+ RC = X86::GR32RegisterClass;
+ break;
+ case MVT::i64:
+ // Must be in x86-64 mode.
+ Opc = X86::MOV64rm;
+ RC = X86::GR64RegisterClass;
+ break;
+ case MVT::f32:
+ if (Subtarget->hasSSE1()) {
+ Opc = X86::MOVSSrm;
+ RC = X86::FR32RegisterClass;
} else {
- Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
+ Opc = X86::LD_Fp32m;
+ RC = X86::RFP32RegisterClass;
}
- Op0 = createResultReg(RC);
- X86AddressMode AM;
- AM.GV = GV;
- addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
- // Prevent loading GV stub multiple times in same MBB.
- LocalValueMap[V] = Op0;
+ break;
+ case MVT::f64:
+ if (Subtarget->hasSSE2()) {
+ Opc = X86::MOVSDrm;
+ RC = X86::FR64RegisterClass;
+ } else {
+ Opc = X86::LD_Fp64m;
+ RC = X86::RFP64RegisterClass;
+ }
+ break;
+ case MVT::f80:
+ Opc = X86::LD_Fp80m;
+ RC = X86::RFP80RegisterClass;
+ break;
}
+
+ ResultReg = createResultReg(RC);
+ X86AddressMode AM;
+ if (Op0)
+ // Address is in register.
+ AM.Base.Reg = Op0;
+ else
+ AM.GV = cast<GlobalValue>(V);
+ addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
return true;
}
-/// X86SelectStore - Select and emit code to implement store instructions.
-bool X86FastISel::X86SelectStore(Instruction* I) {
- MVT VT = MVT::getMVT(I->getOperand(0)->getType());
- if (VT == MVT::Other || !VT.isSimple())
- // Unhandled type. Halt "fast" selection and bail.
- return false;
- if (VT == MVT::iPTR)
- // Use pointer type.
- VT = TLI.getPointerTy();
- // We only handle legal types. For example, on x86-32 the instruction
- // selector contains all of the 64-bit instructions from x86-64,
- // under the assumption that i64 won't be used if the target doesn't
- // support it.
- if (!TLI.isTypeLegal(VT))
- return false;
- unsigned Op0 = getRegForValue(I->getOperand(0));
- if (Op0 == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
-
- Value *V = I->getOperand(1);
- unsigned Op1 = getRegForValue(V);
- if (Op1 == 0) {
- // Handle constant load address.
- if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
- }
-
+/// X86FastEmitStore - Emit a machine instruction to store a value Op0 of
+/// type VT. The address is either pre-computed, i.e. Op1, or a GlobalAddress,
+/// i.e. V. Return true if it is possible.
+bool
+X86FastISel::X86FastEmitStore(MVT VT, unsigned Op0, unsigned Op1, Value *V) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
@@ -179,6 +191,68 @@
return true;
}
+/// X86SelectConstAddr - Select and emit code to materialize constant address.
+///
+bool X86FastISel::X86SelectConstAddr(Value *V,
+ unsigned &Op0) {
+ // FIXME: Only GlobalAddress for now.
+ GlobalValue *GV = dyn_cast<GlobalValue>(V);
+ if (!GV)
+ return false;
+
+ if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
+ // Issue load from stub if necessary.
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = NULL;
+ if (TLI.getPointerTy() == MVT::i32) {
+ Opc = X86::MOV32rm;
+ RC = X86::GR32RegisterClass;
+ } else {
+ Opc = X86::MOV64rm;
+ RC = X86::GR64RegisterClass;
+ }
+ Op0 = createResultReg(RC);
+ X86AddressMode AM;
+ AM.GV = GV;
+ addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
+ // Prevent loading GV stub multiple times in same MBB.
+ LocalValueMap[V] = Op0;
+ }
+ return true;
+}
+
+/// X86SelectStore - Select and emit code to implement store instructions.
+bool X86FastISel::X86SelectStore(Instruction* I) {
+ MVT VT = MVT::getMVT(I->getOperand(0)->getType());
+ if (VT == MVT::Other || !VT.isSimple())
+ // Unhandled type. Halt "fast" selection and bail.
+ return false;
+ if (VT == MVT::iPTR)
+ // Use pointer type.
+ VT = TLI.getPointerTy();
+ // We only handle legal types. For example, on x86-32 the instruction
+ // selector contains all of the 64-bit instructions from x86-64,
+ // under the assumption that i64 won't be used if the target doesn't
+ // support it.
+ if (!TLI.isTypeLegal(VT))
+ return false;
+ unsigned Op0 = getRegForValue(I->getOperand(0));
+ if (Op0 == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+
+ Value *V = I->getOperand(1);
+ unsigned Op1 = getRegForValue(V);
+ if (Op1 == 0) {
+ // Handle constant load address.
+ if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ }
+
+ return X86FastEmitStore(VT, Op0, Op1, V);
+}
+
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(Instruction *I) {
@@ -200,67 +274,19 @@
unsigned Op0 = getRegForValue(V);
if (Op0 == 0) {
// Handle constant load address.
+ // FIXME: If load type is something we can't handle, this can result in
+ // a dead stub load instruction.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
- // Get opcode and regclass of the output for the given load instruction.
- unsigned Opc = 0;
- const TargetRegisterClass *RC = NULL;
- switch (VT.getSimpleVT()) {
- default: return false;
- case MVT::i8:
- Opc = X86::MOV8rm;
- RC = X86::GR8RegisterClass;
- break;
- case MVT::i16:
- Opc = X86::MOV16rm;
- RC = X86::GR16RegisterClass;
- break;
- case MVT::i32:
- Opc = X86::MOV32rm;
- RC = X86::GR32RegisterClass;
- break;
- case MVT::i64:
- // Must be in x86-64 mode.
- Opc = X86::MOV64rm;
- RC = X86::GR64RegisterClass;
- break;
- case MVT::f32:
- if (Subtarget->hasSSE1()) {
- Opc = X86::MOVSSrm;
- RC = X86::FR32RegisterClass;
- } else {
- Opc = X86::LD_Fp32m;
- RC = X86::RFP32RegisterClass;
- }
- break;
- case MVT::f64:
- if (Subtarget->hasSSE2()) {
- Opc = X86::MOVSDrm;
- RC = X86::FR64RegisterClass;
- } else {
- Opc = X86::LD_Fp64m;
- RC = X86::RFP64RegisterClass;
- }
- break;
- case MVT::f80:
- Opc = X86::LD_Fp80m;
- RC = X86::RFP80RegisterClass;
- break;
+ unsigned ResultReg = 0;
+ if (X86FastEmitLoad(VT, Op0, V, ResultReg)) {
+ UpdateValueMap(I, ResultReg);
+ return true;
}
-
- unsigned ResultReg = createResultReg(RC);
- X86AddressMode AM;
- if (Op0)
- // Address is in register.
- AM.Base.Reg = Op0;
- else
- AM.GV = cast<GlobalValue>(V);
- addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
- UpdateValueMap(I, ResultReg);
- return true;
+ return false;
}
bool X86FastISel::X86SelectCmp(Instruction *I) {
@@ -621,10 +647,11 @@
unsigned ResultReg = createResultReg(RC);
if (isa<GlobalValue>(C)) {
+ // FIXME: If store value type is something we can't handle, this can result
+ // in a dead stub load instruction.
if (X86SelectConstAddr(C, ResultReg))
return ResultReg;
- else
- return 0;
+ return 0;
}
More information about the llvm-commits
mailing list