[llvm-commits] CVS: llvm/lib/Target/X86/README-X86-64.txt X86ATTAsmPrinter.cpp X86ATTAsmPrinter.h X86ISelDAGToDAG.cpp X86InstrInfo.td X86InstrX86-64.td X86TargetMachine.cpp
Evan Cheng
evan.cheng at apple.com
Tue Dec 5 11:50:35 PST 2006
Changes in directory llvm/lib/Target/X86:
README-X86-64.txt updated: 1.2 -> 1.3
X86ATTAsmPrinter.cpp updated: 1.77 -> 1.78
X86ATTAsmPrinter.h updated: 1.17 -> 1.18
X86ISelDAGToDAG.cpp updated: 1.134 -> 1.135
X86InstrInfo.td updated: 1.296 -> 1.297
X86InstrX86-64.td updated: 1.10 -> 1.11
X86TargetMachine.cpp updated: 1.129 -> 1.130
---
Log message:
- Switch X86-64 JIT to large code size model.
- Re-enable some codegen niceties for X86-64 static relocation model codegen.
- Clean ups, etc.
---
Diffs of the changes: (+61 -82)
README-X86-64.txt | 35 ---------------------------------
X86ATTAsmPrinter.cpp | 17 ++++++++++------
X86ATTAsmPrinter.h | 2 -
X86ISelDAGToDAG.cpp | 53 +++++++++++++++++++++++++--------------------------
X86InstrInfo.td | 19 +++++++++---------
X86InstrX86-64.td | 14 +++++++++----
X86TargetMachine.cpp | 3 ++
7 files changed, 61 insertions(+), 82 deletions(-)
Index: llvm/lib/Target/X86/README-X86-64.txt
diff -u llvm/lib/Target/X86/README-X86-64.txt:1.2 llvm/lib/Target/X86/README-X86-64.txt:1.3
--- llvm/lib/Target/X86/README-X86-64.txt:1.2 Mon Dec 4 21:58:23 2006
+++ llvm/lib/Target/X86/README-X86-64.txt Tue Dec 5 13:50:18 2006
@@ -229,41 +229,6 @@
//===---------------------------------------------------------------------===//
-For this:
-
-extern int dst[];
-extern int* ptr;
-
-void test(void) {
- ptr = dst;
-}
-
-We generate this code for static relocation model:
-
-_test:
- leaq _dst(%rip), %rax
- movq %rax, _ptr(%rip)
- ret
-
-If we are in small code model, they we can treat _dst as a 32-bit constant.
- movq $_dst, _ptr(%rip)
-
-Note, however, we should continue to use RIP relative addressing mode as much as
-possible. The above is actually one byte shorter than
- movq $_dst, _ptr
-
-A better example is the code from PR1018. We are generating:
- leaq xcalloc2(%rip), %rax
- movq %rax, 8(%rsp)
-when we should be generating:
- movq $xcalloc2, 8(%rsp)
-
-The reason the better codegen isn't done now is support for static small
-code model in JIT mode. The JIT cannot ensure that all GV's are placed in the
-lower 4G so we are not treating GV labels as 32-bit values.
-
-//===---------------------------------------------------------------------===//
-
Right now the asm printer assumes GlobalAddress are accessed via RIP relative
addressing. Therefore, it is not possible to generate this:
movabsq $__ZTV10polynomialIdE+16, %rax
Index: llvm/lib/Target/X86/X86ATTAsmPrinter.cpp
diff -u llvm/lib/Target/X86/X86ATTAsmPrinter.cpp:1.77 llvm/lib/Target/X86/X86ATTAsmPrinter.cpp:1.78
--- llvm/lib/Target/X86/X86ATTAsmPrinter.cpp:1.77 Tue Dec 5 00:43:58 2006
+++ llvm/lib/Target/X86/X86ATTAsmPrinter.cpp Tue Dec 5 13:50:18 2006
@@ -156,7 +156,7 @@
}
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier) {
+ const char *Modifier, bool NotRIPRel) {
const MachineOperand &MO = MI->getOperand(OpNo);
const MRegisterInfo &RI = *TM.getRegisterInfo();
switch (MO.getType()) {
@@ -192,7 +192,7 @@
if (X86PICStyle == PICStyle::Stub &&
TM.getRelocationModel() == Reloc::PIC_)
O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (isMemOp && Subtarget->is64Bit())
+ if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
@@ -210,7 +210,7 @@
else if (Offset < 0)
O << Offset;
- if (isMemOp && Subtarget->is64Bit())
+ if (isMemOp && Subtarget->is64Bit() && !NotRIPRel)
O << "(%rip)";
return;
}
@@ -267,8 +267,12 @@
if (isMemOp && Subtarget->is64Bit()) {
if (isExt && TM.getRelocationModel() != Reloc::Static)
- O << "@GOTPCREL";
- O << "(%rip)";
+ O << "@GOTPCREL(%rip)";
+ else if (!NotRIPRel)
+ // Use rip when possible to reduce code size, except when index or
+ // base register are also part of the address. e.g.
+ // foo(%rip)(%rcx,%rax,4) is not legal
+ O << "(%rip)";
}
return;
@@ -329,10 +333,11 @@
return;
}
+ bool NotRIPRel = IndexReg.getReg() || BaseReg.getReg();
if (DispSpec.isGlobalAddress() ||
DispSpec.isConstantPoolIndex() ||
DispSpec.isJumpTableIndex()) {
- printOperand(MI, Op+3, "mem");
+ printOperand(MI, Op+3, "mem", NotRIPRel);
} else {
int DispVal = DispSpec.getImmedValue();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
Index: llvm/lib/Target/X86/X86ATTAsmPrinter.h
diff -u llvm/lib/Target/X86/X86ATTAsmPrinter.h:1.17 llvm/lib/Target/X86/X86ATTAsmPrinter.h:1.18
--- llvm/lib/Target/X86/X86ATTAsmPrinter.h:1.17 Wed Oct 4 21:43:52 2006
+++ llvm/lib/Target/X86/X86ATTAsmPrinter.h Tue Dec 5 13:50:18 2006
@@ -35,7 +35,7 @@
// These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier = 0);
+ const char *Modifier = 0, bool NotRIPRel = false);
void printi8mem(const MachineInstr *MI, unsigned OpNo) {
printMemReference(MI, OpNo);
}
Index: llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
diff -u llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.134 llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.135
--- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp:1.134 Mon Dec 4 22:01:03 2006
+++ llvm/lib/Target/X86/X86ISelDAGToDAG.cpp Tue Dec 5 13:50:18 2006
@@ -595,44 +595,43 @@
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
break;
-
+ if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
+ break;
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
- // For X86-64 PIC code, only allow GV / CP + displacement so we can use
- // RIP relative addressing mode.
- if (is64Bit &&
- (AM.Base.Reg.Val || AM.Scale > 1 || AM.IndexReg.Val ||
- AM.BaseType == X86ISelAddressMode::FrameIndexBase))
- break;
- if (ConstantPoolSDNode *CP =
- dyn_cast<ConstantPoolSDNode>(N.getOperand(0))) {
- if (AM.CP == 0) {
+ bool isStatic = TM.getRelocationModel() == Reloc::Static;
+ SDOperand N0 = N.getOperand(0);
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+ GlobalValue *GV = G->getGlobal();
+ bool isAbs32 = !is64Bit ||
+ (isStatic && !(GV->isExternal() || GV->hasWeakLinkage() ||
+ GV->hasLinkOnceLinkage()));
+ if (isAbs32 || isRoot) {
+ AM.GV = G->getGlobal();
+ AM.Disp += G->getOffset();
+ AM.isRIPRel = !isAbs32;
+ return false;
+ }
+ } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+ if (!is64Bit || isStatic || isRoot) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
- AM.isRIPRel = is64Bit;
- return false;
- }
- } else if (GlobalAddressSDNode *G =
- dyn_cast<GlobalAddressSDNode>(N.getOperand(0))) {
- if (AM.GV == 0) {
- AM.GV = G->getGlobal();
- AM.Disp += G->getOffset();
- AM.isRIPRel = is64Bit;
+ AM.isRIPRel = !isStatic;
return false;
}
- } else if (isRoot && is64Bit) {
- if (ExternalSymbolSDNode *S =
- dyn_cast<ExternalSymbolSDNode>(N.getOperand(0))) {
+ } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
+ if (isStatic || isRoot) {
AM.ES = S->getSymbol();
- AM.isRIPRel = true;
+ AM.isRIPRel = !isStatic;
return false;
- } else if (JumpTableSDNode *J =
- dyn_cast<JumpTableSDNode>(N.getOperand(0))) {
+ }
+ } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
+ if (isStatic || isRoot) {
AM.JT = J->getIndex();
- AM.isRIPRel = true;
+ AM.isRIPRel = !isStatic;
return false;
}
}
@@ -908,7 +907,7 @@
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
// For X86-64, we should always use lea to materialize RIP relative
// addresses.
- if (Subtarget->is64Bit())
+ if (Subtarget->is64Bit() && TM.getRelocationModel() != Reloc::Static)
Complexity = 4;
else
Complexity += 2;
Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.296 llvm/lib/Target/X86/X86InstrInfo.td:1.297
--- llvm/lib/Target/X86/X86InstrInfo.td:1.296 Thu Nov 30 15:55:46 2006
+++ llvm/lib/Target/X86/X86InstrInfo.td Tue Dec 5 13:50:18 2006
@@ -163,15 +163,16 @@
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
-def HasMMX : Predicate<"Subtarget->hasMMX()">;
-def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
-def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
-def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
-def FPStack : Predicate<"!Subtarget->hasSSE2()">;
-def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
-def In64BitMode : Predicate<"Subtarget->is64Bit()">;
-def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
-def NotSmallCode :Predicate<"TM.getCodeModel() != CodeModel::Small">;
+def HasMMX : Predicate<"Subtarget->hasMMX()">;
+def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
+def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
+def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
+def FPStack : Predicate<"!Subtarget->hasSSE2()">;
+def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
+def In64BitMode : Predicate<"Subtarget->is64Bit()">;
+def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
+def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
+def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
//===----------------------------------------------------------------------===//
// X86 specific pattern fragments.
Index: llvm/lib/Target/X86/X86InstrX86-64.td
diff -u llvm/lib/Target/X86/X86InstrX86-64.td:1.10 llvm/lib/Target/X86/X86InstrX86-64.td:1.11
--- llvm/lib/Target/X86/X86InstrX86-64.td:1.10 Mon Dec 4 22:01:03 2006
+++ llvm/lib/Target/X86/X86InstrX86-64.td Tue Dec 5 13:50:18 2006
@@ -1031,12 +1031,18 @@
def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
-/*
+def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tconstpool:$src)>,
+ Requires<[SmallCode, IsStatic]>;
+def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
+ (MOV64mi32 addr:$dst, tjumptable:$src)>,
+ Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, tglobaladdr:$src)>, Requires<[SmallCode]>;
+ (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
+ Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
- (MOV64mi32 addr:$dst, texternalsym:$src)>, Requires<[SmallCode]>;
-*/
+ (MOV64mi32 addr:$dst, texternalsym:$src)>,
+ Requires<[SmallCode, IsStatic]>;
// Calls
// Direct PC relative function call for small code model. 32-bit displacement
Index: llvm/lib/Target/X86/X86TargetMachine.cpp
diff -u llvm/lib/Target/X86/X86TargetMachine.cpp:1.129 llvm/lib/Target/X86/X86TargetMachine.cpp:1.130
--- llvm/lib/Target/X86/X86TargetMachine.cpp:1.129 Mon Dec 4 12:07:10 2006
+++ llvm/lib/Target/X86/X86TargetMachine.cpp Tue Dec 5 13:50:18 2006
@@ -158,6 +158,9 @@
MachineCodeEmitter &MCE) {
// FIXME: Move this to TargetJITInfo!
setRelocationModel(Reloc::Static);
+ // JIT cannot ensure globals are placed in the lower 4G of address.
+ if (Subtarget.is64Bit())
+ setCodeModel(CodeModel::Large);
PM.add(createX86CodeEmitterPass(*this, MCE));
return false;
More information about the llvm-commits
mailing list