[llvm-commits] [llvm] r131469 - in /llvm/trunk/lib: CodeGen/CallingConvLower.cpp Target/X86/X86ISelLowering.cpp Target/X86/X86ISelLowering.h
Stuart Hastings
stuart at apple.com
Tue May 17 09:59:46 PDT 2011
Author: stuart
Date: Tue May 17 11:59:46 2011
New Revision: 131469
URL: http://llvm.org/viewvc/llvm-project?rev=131469&view=rev
Log:
Revert 131467 due to buildbot complaint.
Modified:
llvm/trunk/lib/CodeGen/CallingConvLower.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.h
Modified: llvm/trunk/lib/CodeGen/CallingConvLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CallingConvLower.cpp?rev=131469&r1=131468&r2=131469&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CallingConvLower.cpp (original)
+++ llvm/trunk/lib/CodeGen/CallingConvLower.cpp Tue May 17 11:59:46 2011
@@ -48,12 +48,8 @@
if (MinAlign > (int)Align)
Align = MinAlign;
TM.getTargetLowering()->HandleByVal(const_cast<CCState*>(this), Size);
- if (Size != 0) {
- unsigned Offset = AllocateStack(Size, Align);
- addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
- } else {
- addLoc(CCValAssign::getReg(ValNo, ValVT, getFirstByValReg(), LocVT, LocInfo));
- }
+ unsigned Offset = AllocateStack(Size, Align);
+ addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
/// MarkAllocated - Mark a register and all of its aliases as allocated.
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=131469&r1=131468&r2=131469&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue May 17 11:59:46 2011
@@ -2091,36 +2091,21 @@
}
if (VA.isRegLoc()) {
- if (isByVal && (!IsSibcall && !isTailCall)) {
- // 64-bit only. x86_32 passes everything on the stack.
- assert(CCInfo.isFirstByValRegValid() && "isByVal, but no valid register assigned!");
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- unsigned int i, j;
- for (i = 0, j = CCInfo.getFirstByValReg(); j <= X86::R9; i++, j++) {
- SDValue Const = DAG.getConstant(8*i, MVT::i64);
- SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
- SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
- MachinePointerInfo(),
- false, false, 0);
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(j, Load));
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ if (isVarArg && IsWin64) {
+ // Win64 ABI requires argument XMM reg to be copied to the corresponding
+ // shadow reg if callee is a varargs function.
+ unsigned ShadowReg = 0;
+ switch (VA.getLocReg()) {
+ case X86::XMM0: ShadowReg = X86::RCX; break;
+ case X86::XMM1: ShadowReg = X86::RDX; break;
+ case X86::XMM2: ShadowReg = X86::R8; break;
+ case X86::XMM3: ShadowReg = X86::R9; break;
}
- CCInfo.clearFirstByValReg();
- } else {
+ if (ShadowReg)
+ RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
+ } else { // Usual case: not byval.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- if (isVarArg && IsWin64) {
- // Win64 ABI requires argument XMM reg to be copied to the corresponding
- // shadow reg if callee is a varargs function.
- unsigned ShadowReg = 0;
- switch (VA.getLocReg()) {
- case X86::XMM0: ShadowReg = X86::RCX; break;
- case X86::XMM1: ShadowReg = X86::RDX; break;
- case X86::XMM2: ShadowReg = X86::R8; break;
- case X86::XMM3: ShadowReg = X86::R9; break;
- }
- if (ShadowReg)
- RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
- }
}
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
@@ -2455,47 +2440,6 @@
return Offset;
}
-/// HandleByVal - Every parameter *after* a byval parameter is passed
-/// on the stack. Remember the next parameter register to allocate,
-/// and then confiscate the rest of the parameter registers to insure
-/// this.
-void
-X86TargetLowering::HandleByVal(CCState *State, unsigned &size) const {
- // X86_32 passes all parameters on the stack, byval or whatever.
- // X86_64 does not split parameters between registers and memory; if
- // the parameter does not fit entirely inside the remaining
- // parameter registers, it goes on the stack.
- static const unsigned RegList2[] = {
- X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
- };
-
- if (!Subtarget->is64Bit())
- return;
-
- if (size > 16) // X86_64 aggregates > 16 bytes are passed in memory.
- return;
-
- unsigned reg = State->getFirstUnallocated(RegList2, 6);
-
- if (reg == 6) // Out of regs to allocate.
- return;
-
- // We expect the size to be 32 bits, or some non-zero multiple of 64 bits.
- unsigned nregs = size / 8;
- if (nregs == 0) nregs=1; // 32-bit case.
-
- unsigned regs_available = 6 - reg;
-
- if (nregs <= regs_available) {
- size = 0;
- State->setFirstByValReg(RegList2[reg]);
- while (nregs--) {
- State->AllocateReg(RegList2[reg]);
- reg++;
- }
- }
-}
-
/// MatchingStackOffset - Return true if the given stack call argument is
/// already available in the same position (relatively) of the caller's
/// incoming argument stack.
@@ -2660,7 +2604,7 @@
}
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
- if (ArgLocs.size()) {
+ if (CCInfo.getNextStackOffset()) {
MachineFunction &MF = DAG.getMachineFunction();
if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
return false;
@@ -2677,8 +2621,6 @@
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
- if (Flags.isByVal())
- return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
MFI, MRI, TII))
@@ -11121,6 +11063,13 @@
return SDValue();
}
+/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
+/// generation and convert it from being a bunch of shuffles and extracts
+/// to a simple store and scalar loads to extract the elements.
+static SDValue PerformVectorZeroExtendCombine(SDNode *N, SelectionDAG &DAG) {
+ return SDValue();
+}
+
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
@@ -12181,6 +12130,9 @@
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
+ case X86ISD::PMOVZXBW:
+ case X86ISD::PMOVZXWD:
+ case X86ISD::PMOVZXDQ: return PerformVectorZeroExtendCombine(N, DAG);
case X86ISD::SHUFPS: // Handle all target specific shuffles
case X86ISD::SHUFPD:
case X86ISD::PALIGN:
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=131469&r1=131468&r2=131469&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Tue May 17 11:59:46 2011
@@ -263,6 +263,10 @@
PUNPCKHDQ,
PUNPCKHQDQ,
+ PMOVZXBW,
+ PMOVZXWD,
+ PMOVZXDQ,
+
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
// according to %al. An operator is needed so that this can be expanded
// with control flow.
@@ -728,8 +732,6 @@
// Call lowering helpers.
- void HandleByVal(CCState *, unsigned &) const;
-
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
More information about the llvm-commits
mailing list