[llvm-commits] CVS: llvm/lib/Target/X86/X86ISelLowering.cpp X86ISelLowering.h X86JITInfo.cpp
Evan Cheng
evan.cheng at apple.com
Sat Jun 24 01:36:24 PDT 2006
Changes in directory llvm/lib/Target/X86:
X86ISelLowering.cpp updated: 1.230 -> 1.231
X86ISelLowering.h updated: 1.66 -> 1.67
X86JITInfo.cpp updated: 1.20 -> 1.21
---
Log message:
Simplify X86CompilationCallback: always align to 16-byte boundary; don't save EAX/EDX if unnecessary.
---
Diffs of the changes: (+44 -34)
X86ISelLowering.cpp | 32 ++++++++++++--------------------
X86ISelLowering.h | 16 ++++++++++++++++
X86JITInfo.cpp | 30 ++++++++++++++++--------------
3 files changed, 44 insertions(+), 34 deletions(-)
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
diff -u llvm/lib/Target/X86/X86ISelLowering.cpp:1.230 llvm/lib/Target/X86/X86ISelLowering.cpp:1.231
--- llvm/lib/Target/X86/X86ISelLowering.cpp:1.230 Thu Jun 15 03:14:54 2006
+++ llvm/lib/Target/X86/X86ISelLowering.cpp Sat Jun 24 03:36:10 2006
@@ -762,26 +762,6 @@
// (when we have a global fp allocator) and do other tricks.
//
-// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
-// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
-// EDX". Anything more is illegal.
-//
-// FIXME: The linscan register allocator currently has problem with
-// coalescing. At the time of this writing, whenever it decides to coalesce
-// a physreg with a virtreg, this increases the size of the physreg's live
-// range, and the live range cannot ever be reduced. This causes problems if
-// too many physregs are coaleced with virtregs, which can cause the register
-// allocator to wedge itself.
-//
-// This code triggers this problem more often if we pass args in registers,
-// so disable it until this is fixed.
-//
-// NOTE: this isn't marked const, so that GCC doesn't emit annoying warnings
-// about code being dead.
-//
-static unsigned FASTCC_NUM_INT_ARGS_INREGS = 0;
-
-
/// HowToPassFastCCArgument - Returns how an formal argument of the specified
/// type should be passed. If it is through stack, returns the size of the stack
/// slot; if it is through integer or XMM register, returns the number of
@@ -798,30 +778,38 @@
switch (ObjectVT) {
default: assert(0 && "Unhandled argument type!");
case MVT::i8:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
ObjIntRegs = 1;
else
+#endif
ObjSize = 1;
break;
case MVT::i16:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
ObjIntRegs = 1;
else
+#endif
ObjSize = 2;
break;
case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS)
ObjIntRegs = 1;
else
+#endif
ObjSize = 4;
break;
case MVT::i64:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs+2 <= FASTCC_NUM_INT_ARGS_INREGS) {
ObjIntRegs = 2;
} else if (NumIntRegs+1 <= FASTCC_NUM_INT_ARGS_INREGS) {
ObjIntRegs = 1;
ObjSize = 4;
} else
+#endif
ObjSize = 8;
case MVT::f32:
ObjSize = 4;
@@ -1027,10 +1015,12 @@
case MVT::i8:
case MVT::i16:
case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
++NumIntRegs;
break;
}
+#endif
// Fall through
case MVT::f32:
NumBytes += 4;
@@ -1076,6 +1066,7 @@
case MVT::i8:
case MVT::i16:
case MVT::i32:
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
if (NumIntRegs < FASTCC_NUM_INT_ARGS_INREGS) {
RegsToPass.push_back(
std::make_pair(GPRArgRegs[Arg.getValueType()-MVT::i8][NumIntRegs],
@@ -1083,6 +1074,7 @@
++NumIntRegs;
break;
}
+#endif
// Fall through
case MVT::f32: {
SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
Index: llvm/lib/Target/X86/X86ISelLowering.h
diff -u llvm/lib/Target/X86/X86ISelLowering.h:1.66 llvm/lib/Target/X86/X86ISelLowering.h:1.67
--- llvm/lib/Target/X86/X86ISelLowering.h:1.66 Wed May 24 19:59:30 2006
+++ llvm/lib/Target/X86/X86ISelLowering.h Sat Jun 24 03:36:10 2006
@@ -370,4 +370,20 @@
};
}
+// FASTCC_NUM_INT_ARGS_INREGS - This is the max number of integer arguments
+// to pass in registers. 0 is none, 1 is is "use EAX", 2 is "use EAX and
+// EDX". Anything more is illegal.
+//
+// FIXME: The linscan register allocator currently has problem with
+// coalescing. At the time of this writing, whenever it decides to coalesce
+// a physreg with a virtreg, this increases the size of the physreg's live
+// range, and the live range cannot ever be reduced. This causes problems if
+// too many physregs are coaleced with virtregs, which can cause the register
+// allocator to wedge itself.
+//
+// This code triggers this problem more often if we pass args in registers,
+// so disable it until this is fixed.
+//
+#define FASTCC_NUM_INT_ARGS_INREGS 0
+
#endif // X86ISELLOWERING_H
Index: llvm/lib/Target/X86/X86JITInfo.cpp
diff -u llvm/lib/Target/X86/X86JITInfo.cpp:1.20 llvm/lib/Target/X86/X86JITInfo.cpp:1.21
--- llvm/lib/Target/X86/X86JITInfo.cpp:1.20 Thu Jun 1 12:13:10 2006
+++ llvm/lib/Target/X86/X86JITInfo.cpp Sat Jun 24 03:36:10 2006
@@ -57,26 +57,28 @@
#endif
"pushl %ebp\n"
"movl %esp, %ebp\n" // Standard prologue
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
"pushl %eax\n"
- "pushl %edx\n" // save EAX/EDX
-#if defined(__CYGWIN__) || defined(__MINGW32__)
- "call _X86CompilationCallback2\n"
-#elif defined(__APPLE__)
- "movl 4(%ebp), %eax\n" // load the address of return address
- "movl $24, %edx\n" // if the opcode of the instruction at the
- "cmpb $-51, (%eax)\n" // return address is our 0xCD marker, then
- "movl $12, %eax\n" // subtract 24 from %esp to realign it to 16
- "cmovne %eax, %edx\n" // bytes after the push of edx, the amount to.
- "subl %edx, %esp\n" // the push of edx to keep it aligned.
- "pushl %edx\n" // subtract. Otherwise, subtract 12 bytes after
+ "pushl %edx\n" // Save EAX/EDX
+#endif
+#if defined(__APPLE__)
+ "andl $-16, %esp\n" // Align ESP on 16-byte boundary
+#endif
+#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(__APPLE__)
"call _X86CompilationCallback2\n"
- "popl %edx\n"
- "addl %edx, %esp\n"
#else
- "call X86CompilationCallback2\n"
+ "call X86CompilationCallback2\n"
+#endif
+#if defined(__APPLE__)
+ "movl %ebp, %esp\n" // Restore ESP
+#endif
+#if FASTCC_NUM_INT_ARGS_INREGS > 0
+#if defined(__APPLE__)
+ "subl $8, %esp\n"
#endif
"popl %edx\n"
"popl %eax\n"
+#endif
"popl %ebp\n"
"ret\n");
#else
More information about the llvm-commits
mailing list