[llvm-commits] [llvm] r56925 - in /llvm/trunk/lib/Target/X86: X86FastISel.cpp X86Instr64bit.td X86InstrInfo.td X86RegisterInfo.cpp
Dan Gohman
gohman at apple.com
Wed Oct 1 11:28:07 PDT 2008
Author: djg
Date: Wed Oct 1 13:28:06 2008
New Revision: 56925
URL: http://llvm.org/viewvc/llvm-project?rev=56925&view=rev
Log:
Split x86's ADJCALLSTACK instructions into 32-bit and 64-bit forms.
This allows the 64-bit forms to use+def RSP instead of ESP. This
doesn't fix any real bugs today, but it is more precise and it
makes the debug dumps on x86-64 look more consistent.
Also, add some comments describing the CALL instructions' physreg
operand uses and defs.
Modified:
llvm/trunk/lib/Target/X86/X86FastISel.cpp
llvm/trunk/lib/Target/X86/X86Instr64bit.td
llvm/trunk/lib/Target/X86/X86InstrInfo.td
llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=56925&r1=56924&r2=56925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Wed Oct 1 13:28:06 2008
@@ -960,7 +960,8 @@
unsigned NumBytes = CCInfo.getNextStackOffset();
// Issue CALLSEQ_START
- BuildMI(MBB, TII.get(X86::ADJCALLSTACKDOWN)).addImm(NumBytes);
+ unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
+ BuildMI(MBB, TII.get(AdjStackDown)).addImm(NumBytes);
// Process argumenet: walk the register/memloc assignments, inserting
// copies / loads.
@@ -1051,7 +1052,8 @@
}
// Issue CALLSEQ_END
- BuildMI(MBB, TII.get(X86::ADJCALLSTACKUP)).addImm(NumBytes).addImm(0);
+ unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
+ BuildMI(MBB, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
if (RetVT.getSimpleVT() != MVT::isVoid) {
Modified: llvm/trunk/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Instr64bit.td?rev=56925&r1=56924&r2=56925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/trunk/lib/Target/X86/X86Instr64bit.td Wed Oct 1 13:28:06 2008
@@ -86,11 +86,30 @@
// Instruction list...
//
+// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
+// a stack adjustment and the codegen must know that they may modify the stack
+// pointer before prolog-epilog rewriting occurs.
+// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
+// sub / add which can clobber EFLAGS.
+let Defs = [RSP, EFLAGS], Uses = [RSP] in {
+def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start imm:$amt)]>,
+ Requires<[In64BitMode]>;
+def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end imm:$amt1, imm:$amt2)]>,
+ Requires<[In64BitMode]>;
+}
+
//===----------------------------------------------------------------------===//
// Call Instructions...
//
let isCall = 1 in
- // All calls clobber the non-callee saved registers...
+ // All calls clobber the non-callee saved registers. RSP is marked as
+ // a use to prevent stack-pointer assignments that appear immediately
+ // before calls from potentially appearing dead. Uses for argument
+ // registers are added manually.
let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=56925&r1=56924&r2=56925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Wed Oct 1 13:28:06 2008
@@ -321,12 +321,14 @@
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [ESP, EFLAGS], Uses = [ESP] in {
-def ADJCALLSTACKDOWN : I<0, Pseudo, (outs), (ins i32imm:$amt),
- "#ADJCALLSTACKDOWN",
- [(X86callseq_start imm:$amt)]>;
-def ADJCALLSTACKUP : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
- "#ADJCALLSTACKUP",
- [(X86callseq_end imm:$amt1, imm:$amt2)]>;
+def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), (ins i32imm:$amt),
+ "#ADJCALLSTACKDOWN",
+ [(X86callseq_start imm:$amt)]>,
+ Requires<[In32BitMode]>;
+def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "#ADJCALLSTACKUP",
+ [(X86callseq_end imm:$amt1, imm:$amt2)]>,
+ Requires<[In32BitMode]>;
}
// Nop
@@ -411,7 +413,10 @@
// Call Instructions...
//
let isCall = 1 in
- // All calls clobber the non-callee saved registers...
+ // All calls clobber the non-callee saved registers. ESP is marked as
+ // a use to prevent stack-pointer assignments that appear immediately
+ // before calls from potentially appearing dead. Uses for argument
+ // registers are added manually.
let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, EFLAGS],
Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp?rev=56925&r1=56924&r2=56925&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.cpp Wed Oct 1 13:28:06 2008
@@ -42,7 +42,12 @@
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
- : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP),
+ : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
+ X86::ADJCALLSTACKDOWN64 :
+ X86::ADJCALLSTACKDOWN32,
+ tm.getSubtarget<X86Subtarget>().is64Bit() ?
+ X86::ADJCALLSTACKUP64 :
+ X86::ADJCALLSTACKUP32),
TM(tm), TII(tii) {
// Cache some information.
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
@@ -367,11 +372,11 @@
Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
MachineInstr *New = 0;
- if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
+ if (Old->getOpcode() == getCallFrameSetupOpcode()) {
New = BuildMI(MF, TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
StackPtr).addReg(StackPtr).addImm(Amount);
} else {
- assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
+ assert(Old->getOpcode() == getCallFrameDestroyOpcode());
// factor out the amount the callee already popped.
uint64_t CalleeAmt = Old->getOperand(1).getImm();
Amount -= CalleeAmt;
@@ -387,7 +392,7 @@
// Replace the pseudo instruction with a new instruction...
if (New) MBB.insert(I, New);
}
- } else if (I->getOpcode() == X86::ADJCALLSTACKUP) {
+ } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
More information about the llvm-commits
mailing list