[llvm-branch-commits] [llvm] bfa6ca0 - [PowerPC] Delete remnant Darwin ISelLowering code
Fangrui Song via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Jan 6 21:44:46 PST 2021
Author: Fangrui Song
Date: 2021-01-06T21:40:40-08:00
New Revision: bfa6ca07a8cda0ab889b7fee0b914907ce594e11
URL: https://github.com/llvm/llvm-project/commit/bfa6ca07a8cda0ab889b7fee0b914907ce594e11
DIFF: https://github.com/llvm/llvm-project/commit/bfa6ca07a8cda0ab889b7fee0b914907ce594e11.diff
LOG: [PowerPC] Delete remnant Darwin ISelLowering code
Added:
Modified:
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.h
llvm/lib/Target/PowerPC/PPCSubtarget.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1b1e9e019476..b92f4a15a49e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3741,11 +3741,8 @@ SDValue PPCTargetLowering::LowerFormalArguments(
if (Subtarget.is64BitELFABI())
return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
InVals);
- if (Subtarget.is32BitELFABI())
- return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
- InVals);
-
- return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
+ assert(Subtarget.is32BitELFABI());
+ return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
InVals);
}
@@ -4399,366 +4396,6 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
return Chain;
}
-SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
- // TODO: add description of PPC stack frame format, or at least some docs.
- //
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo &MFI = MF.getFrameInfo();
- PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
-
- EVT PtrVT = getPointerTy(MF.getDataLayout());
- bool isPPC64 = PtrVT == MVT::i64;
- // Potential tail calls could cause overwriting of argument stack slots.
- bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
- (CallConv == CallingConv::Fast));
- unsigned PtrByteSize = isPPC64 ? 8 : 4;
- unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
- unsigned ArgOffset = LinkageSize;
- // Area that is at least reserved in caller of this function.
- unsigned MinReservedArea = ArgOffset;
-
- static const MCPhysReg GPR_32[] = { // 32-bit registers.
- PPC::R3, PPC::R4, PPC::R5, PPC::R6,
- PPC::R7, PPC::R8, PPC::R9, PPC::R10,
- };
- static const MCPhysReg GPR_64[] = { // 64-bit registers.
- PPC::X3, PPC::X4, PPC::X5, PPC::X6,
- PPC::X7, PPC::X8, PPC::X9, PPC::X10,
- };
- static const MCPhysReg VR[] = {
- PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
- PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
- };
-
- const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
- const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
- const unsigned Num_VR_Regs = array_lengthof( VR);
-
- unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
-
- const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
-
- // In 32-bit non-varargs functions, the stack space for vectors is after the
- // stack space for non-vectors. We do not use this space unless we have
- // too many vectors to fit in registers, something that only occurs in
- // constructed examples:), but we have to walk the arglist to figure
- // that out...for the pathological case, compute VecArgOffset as the
- // start of the vector parameter area. Computing VecArgOffset is the
- // entire point of the following loop.
- unsigned VecArgOffset = ArgOffset;
- if (!isVarArg && !isPPC64) {
- for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
- ++ArgNo) {
- EVT ObjectVT = Ins[ArgNo].VT;
- ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
-
- if (Flags.isByVal()) {
- // ObjSize is the true size, ArgSize rounded up to multiple of regs.
- unsigned ObjSize = Flags.getByValSize();
- unsigned ArgSize =
- ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
- VecArgOffset += ArgSize;
- continue;
- }
-
- switch(ObjectVT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unhandled argument type!");
- case MVT::i1:
- case MVT::i32:
- case MVT::f32:
- VecArgOffset += 4;
- break;
- case MVT::i64: // PPC64
- case MVT::f64:
- // FIXME: We are guaranteed to be !isPPC64 at this point.
- // Does MVT::i64 apply?
- VecArgOffset += 8;
- break;
- case MVT::v4f32:
- case MVT::v4i32:
- case MVT::v8i16:
- case MVT::v16i8:
- // Nothing to do, we're only looking at Nonvector args here.
- break;
- }
- }
- }
- // We've found where the vector parameter area in memory is. Skip the
- // first 12 parameters; these don't use that memory.
- VecArgOffset = ((VecArgOffset+15)/16)*16;
- VecArgOffset += 12*16;
-
- // Add DAG nodes to load the arguments or copy them out of registers. On
- // entry to a function on PPC, the arguments start after the linkage area,
- // although the first ones are often in registers.
-
- SmallVector<SDValue, 8> MemOps;
- unsigned nAltivecParamsAtEnd = 0;
- Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
- unsigned CurArgIdx = 0;
- for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
- SDValue ArgVal;
- bool needsLoad = false;
- EVT ObjectVT = Ins[ArgNo].VT;
- unsigned ObjSize = ObjectVT.getSizeInBits()/8;
- unsigned ArgSize = ObjSize;
- ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
- if (Ins[ArgNo].isOrigArg()) {
- std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
- CurArgIdx = Ins[ArgNo].getOrigArgIndex();
- }
- unsigned CurArgOffset = ArgOffset;
-
- // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
- if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
- ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
- if (isVarArg || isPPC64) {
- MinReservedArea = ((MinReservedArea+15)/16)*16;
- MinReservedArea += CalculateStackSlotSize(ObjectVT,
- Flags,
- PtrByteSize);
- } else nAltivecParamsAtEnd++;
- } else
- // Calculate min reserved area.
- MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
- Flags,
- PtrByteSize);
-
- // FIXME the codegen can be much improved in some cases.
- // We do not have to keep everything in memory.
- if (Flags.isByVal()) {
- assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
-
- // ObjSize is the true size, ArgSize rounded up to multiple of registers.
- ObjSize = Flags.getByValSize();
- ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
- // Objects of size 1 and 2 are right justified, everything else is
- // left justified. This means the memory address is adjusted forwards.
- if (ObjSize==1 || ObjSize==2) {
- CurArgOffset = CurArgOffset + (4 - ObjSize);
- }
- // The value of the object is its address.
- int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
- SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- InVals.push_back(FIN);
- if (ObjSize==1 || ObjSize==2) {
- if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg;
- if (isPPC64)
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
- else
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
- SDValue Store =
- DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(&*FuncArg), ObjType);
- MemOps.push_back(Store);
- ++GPR_idx;
- }
-
- ArgOffset += PtrByteSize;
-
- continue;
- }
- for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
- // Store whatever pieces of the object are in registers
- // to memory. ArgOffset will be the address of the beginning
- // of the object.
- if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg;
- if (isPPC64)
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
- else
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
- int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
- SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- MachinePointerInfo(&*FuncArg, j));
- MemOps.push_back(Store);
- ++GPR_idx;
- ArgOffset += PtrByteSize;
- } else {
- ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
- break;
- }
- }
- continue;
- }
-
- switch (ObjectVT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unhandled argument type!");
- case MVT::i1:
- case MVT::i32:
- if (!isPPC64) {
- if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
-
- if (ObjectVT == MVT::i1)
- ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
-
- ++GPR_idx;
- } else {
- needsLoad = true;
- ArgSize = PtrByteSize;
- }
- // All int arguments reserve stack space in the Darwin ABI.
- ArgOffset += PtrByteSize;
- break;
- }
- LLVM_FALLTHROUGH;
- case MVT::i64: // PPC64
- if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
-
- if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
- // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
- // value to MVT::i64 and then truncate to the correct register size.
- ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
-
- ++GPR_idx;
- } else {
- needsLoad = true;
- ArgSize = PtrByteSize;
- }
- // All int arguments reserve stack space in the Darwin ABI.
- ArgOffset += 8;
- break;
-
- case MVT::f32:
- case MVT::f64:
- // Every 4 bytes of argument space consumes one of the GPRs available for
- // argument passing.
- if (GPR_idx != Num_GPR_Regs) {
- ++GPR_idx;
- if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
- ++GPR_idx;
- }
- if (FPR_idx != Num_FPR_Regs) {
- unsigned VReg;
-
- if (ObjectVT == MVT::f32)
- VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
- else
- VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
-
- ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
- ++FPR_idx;
- } else {
- needsLoad = true;
- }
-
- // All FP arguments reserve stack space in the Darwin ABI.
- ArgOffset += isPPC64 ? 8 : ObjSize;
- break;
- case MVT::v4f32:
- case MVT::v4i32:
- case MVT::v8i16:
- case MVT::v16i8:
- // Note that vector arguments in registers don't reserve stack space,
- // except in varargs functions.
- if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
- ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
- if (isVarArg) {
- while ((ArgOffset % 16) != 0) {
- ArgOffset += PtrByteSize;
- if (GPR_idx != Num_GPR_Regs)
- GPR_idx++;
- }
- ArgOffset += 16;
- GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
- }
- ++VR_idx;
- } else {
- if (!isVarArg && !isPPC64) {
- // Vectors go after all the nonvectors.
- CurArgOffset = VecArgOffset;
- VecArgOffset += 16;
- } else {
- // Vectors are aligned.
- ArgOffset = ((ArgOffset+15)/16)*16;
- CurArgOffset = ArgOffset;
- ArgOffset += 16;
- }
- needsLoad = true;
- }
- break;
- }
-
- // We need to load the argument to a virtual register if we determined above
- // that we ran out of physical registers of the appropriate type.
- if (needsLoad) {
- int FI = MFI.CreateFixedObject(ObjSize,
- CurArgOffset + (ArgSize - ObjSize),
- isImmutable);
- SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
- ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
- }
-
- InVals.push_back(ArgVal);
- }
-
- // Allow for Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- MinReservedArea = ((MinReservedArea+15)/16)*16;
- MinReservedArea += 16*nAltivecParamsAtEnd;
- }
-
- // Area that is at least reserved in the caller of this function.
- MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
-
- // Set the size that is at least reserved in caller of this function. Tail
- // call optimized functions' reserved stack space needs to be aligned so that
- // taking the
diff erence between two stack areas will result in an aligned
- // stack.
- MinReservedArea =
- EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
- FuncInfo->setMinReservedArea(MinReservedArea);
-
- // If the function takes variable number of arguments, make a frame index for
- // the start of the first vararg value... for expansion of llvm.va_start.
- if (isVarArg) {
- int Depth = ArgOffset;
-
- FuncInfo->setVarArgsFrameIndex(
- MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
- Depth, true));
- SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
-
- // If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by dereferencing
- // the result of va_next.
- for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
- unsigned VReg;
-
- if (isPPC64)
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
- else
- VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
-
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
- SDValue Store =
- DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
- MemOps.push_back(Store);
- // Increment the address by four for the next argument to store
- SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
- FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
- }
- }
-
- if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
-
- return Chain;
-}
-
/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
/// adjusted to accommodate the arguments for the tailcall.
static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
@@ -5793,19 +5430,15 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
CLI.NoMerge);
- if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
- return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CB);
-
- if (Subtarget.isSVR4ABI())
- return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
- InVals, CB);
-
if (Subtarget.isAIXABI())
return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
InVals, CB);
- return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
+ assert(Subtarget.isSVR4ABI());
+ if (Subtarget.isPPC64())
+ return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
+ InVals, CB);
+ return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
InVals, CB);
}
@@ -6652,384 +6285,6 @@ SDValue PPCTargetLowering::LowerCall_64SVR4(
Callee, SPDiff, NumBytes, Ins, InVals, CB);
}
-SDValue PPCTargetLowering::LowerCall_Darwin(
- SDValue Chain, SDValue Callee, CallFlags CFlags,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
- const CallBase *CB) const {
- unsigned NumOps = Outs.size();
-
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- bool isPPC64 = PtrVT == MVT::i64;
- unsigned PtrByteSize = isPPC64 ? 8 : 4;
-
- MachineFunction &MF = DAG.getMachineFunction();
-
- // Mark this function as potentially containing a function that contains a
- // tail call. As a consequence the frame pointer will be used for dynamicalloc
- // and restoring the callers stack pointer in this functions epilog. This is
- // done because by tail calling the called function might overwrite the value
- // in this function's (MF) stack pointer stack slot 0(SP).
- if (getTargetMachine().Options.GuaranteedTailCallOpt &&
- CFlags.CallConv == CallingConv::Fast)
- MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
-
- // Count how many bytes are to be pushed on the stack, including the linkage
- // area, and parameter passing area. We start with 24/48 bytes, which is
- // prereserved space for [SP][CR][LR][3 x unused].
- unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
- unsigned NumBytes = LinkageSize;
-
- // Add up all the space actually used.
- // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
- // they all go in registers, but we must reserve stack space for them for
- // possible use by the caller. In varargs or 64-bit calls, parameters are
- // assigned stack space in order, with padding so Altivec parameters are
- // 16-byte aligned.
- unsigned nAltivecParamsAtEnd = 0;
- for (unsigned i = 0; i != NumOps; ++i) {
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
- EVT ArgVT = Outs[i].VT;
- // Varargs Altivec parameters are padded to a 16 byte boundary.
- if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
- ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
- ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
- if (!CFlags.IsVarArg && !isPPC64) {
- // Non-varargs Altivec parameters go after all the non-Altivec
- // parameters; handle those later so we know how much padding we need.
- nAltivecParamsAtEnd++;
- continue;
- }
- // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
- NumBytes = ((NumBytes+15)/16)*16;
- }
- NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
- }
-
- // Allow for Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- NumBytes = ((NumBytes+15)/16)*16;
- NumBytes += 16*nAltivecParamsAtEnd;
- }
-
- // The prolog code of the callee may store up to 8 GPR argument registers to
- // the stack, allowing va_start to index over them in memory if its varargs.
- // Because we cannot tell if this is needed on the caller side, we have to
- // conservatively assume that it is needed. As such, make sure we have at
- // least enough stack space for the caller to store the 8 GPRs.
- NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
-
- // Tail call needs the stack to be aligned.
- if (getTargetMachine().Options.GuaranteedTailCallOpt &&
- CFlags.CallConv == CallingConv::Fast)
- NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
-
- // Calculate by how many bytes the stack has to be adjusted in case of tail
- // call optimization.
- int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
-
- // To protect arguments on the stack from being clobbered in a tail call,
- // force all the loads to happen before doing any other lowering.
- if (CFlags.IsTailCall)
- Chain = DAG.getStackArgumentTokenFactor(Chain);
-
- // Adjust the stack pointer for the new arguments...
- // These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
- SDValue CallSeqStart = Chain;
-
- // Load the return address and frame pointer so it can be move somewhere else
- // later.
- SDValue LROp, FPOp;
- Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
-
- // Set up a copy of the stack pointer for use loading and storing any
- // arguments that may not fit in the registers available for argument
- // passing.
- SDValue StackPtr;
- if (isPPC64)
- StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
- else
- StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
-
- // Figure out which arguments are going to go in registers, and which in
- // memory. Also, if this is a vararg function, floating point operations
- // must be stored to our stack, and loaded into integer regs as well, if
- // any integer regs are available for argument passing.
- unsigned ArgOffset = LinkageSize;
- unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
-
- static const MCPhysReg GPR_32[] = { // 32-bit registers.
- PPC::R3, PPC::R4, PPC::R5, PPC::R6,
- PPC::R7, PPC::R8, PPC::R9, PPC::R10,
- };
- static const MCPhysReg GPR_64[] = { // 64-bit registers.
- PPC::X3, PPC::X4, PPC::X5, PPC::X6,
- PPC::X7, PPC::X8, PPC::X9, PPC::X10,
- };
- static const MCPhysReg VR[] = {
- PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
- PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
- };
- const unsigned NumGPRs = array_lengthof(GPR_32);
- const unsigned NumFPRs = 13;
- const unsigned NumVRs = array_lengthof(VR);
-
- const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
-
- SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
- SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
-
- SmallVector<SDValue, 8> MemOpChains;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = OutVals[i];
- ISD::ArgFlagsTy Flags = Outs[i].Flags;
-
- // PtrOff will be used to store the current argument to the stack if a
- // register cannot be found for it.
- SDValue PtrOff;
-
- PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
-
- PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
-
- // On PPC64, promote integers to 64-bit values.
- if (isPPC64 && Arg.getValueType() == MVT::i32) {
- // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
- unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
- }
-
- // FIXME memcpy is used way more than necessary. Correctness first.
- // Note: "by value" is code for passing a structure by value, not
- // basic types.
- if (Flags.isByVal()) {
- unsigned Size = Flags.getByValSize();
- // Very small objects are passed right-justified. Everything else is
- // passed left-justified.
- if (Size==1 || Size==2) {
- EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
- if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
- MachinePointerInfo(), VT);
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
-
- ArgOffset += PtrByteSize;
- } else {
- SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
- PtrOff.getValueType());
- SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
- Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
- CallSeqStart,
- Flags, DAG, dl);
- ArgOffset += PtrByteSize;
- }
- continue;
- }
- // Copy entire object into memory. There are cases where gcc-generated
- // code assumes it is there, even if it could be put entirely into
- // registers. (This is not what the doc says.)
- Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
- CallSeqStart,
- Flags, DAG, dl);
-
- // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
- // copy the pieces of the object that fit into registers from the
- // parameter save area.
- for (unsigned j=0; j<Size; j+=PtrByteSize) {
- SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
- SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
- if (GPR_idx != NumGPRs) {
- SDValue Load =
- DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
- ArgOffset += PtrByteSize;
- } else {
- ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
- break;
- }
- }
- continue;
- }
-
- switch (Arg.getSimpleValueType().SimpleTy) {
- default: llvm_unreachable("Unexpected ValueType for argument!");
- case MVT::i1:
- case MVT::i32:
- case MVT::i64:
- if (GPR_idx != NumGPRs) {
- if (Arg.getValueType() == MVT::i1)
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
-
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
- } else {
- LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
- isPPC64, CFlags.IsTailCall, false, MemOpChains,
- TailCallArguments, dl);
- }
- ArgOffset += PtrByteSize;
- break;
- case MVT::f32:
- case MVT::f64:
- if (FPR_idx != NumFPRs) {
- RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
-
- if (CFlags.IsVarArg) {
- SDValue Store =
- DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
- MemOpChains.push_back(Store);
-
- // Float varargs are always shadowed in available integer registers
- if (GPR_idx != NumGPRs) {
- SDValue Load =
- DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
- }
- if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
- SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
- SDValue Load =
- DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
- }
- } else {
- // If we have any FPRs remaining, we may also have GPRs remaining.
- // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
- // GPRs.
- if (GPR_idx != NumGPRs)
- ++GPR_idx;
- if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
- !isPPC64) // PPC64 has 64-bit GPR's obviously :)
- ++GPR_idx;
- }
- } else
- LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
- isPPC64, CFlags.IsTailCall, false, MemOpChains,
- TailCallArguments, dl);
- if (isPPC64)
- ArgOffset += 8;
- else
- ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
- break;
- case MVT::v4f32:
- case MVT::v4i32:
- case MVT::v8i16:
- case MVT::v16i8:
- if (CFlags.IsVarArg) {
- // These go aligned on the stack, or in the corresponding R registers
- // when within range. The Darwin PPC ABI doc claims they also go in
- // V registers; in fact gcc does this only for arguments that are
- // prototyped, not for those that match the ... We do it for all
- // arguments, seems to work.
- while (ArgOffset % 16 !=0) {
- ArgOffset += PtrByteSize;
- if (GPR_idx != NumGPRs)
- GPR_idx++;
- }
- // We could elide this store in the case where the object fits
- // entirely in R registers. Maybe later.
- PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, dl, PtrVT));
- SDValue Store =
- DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
- MemOpChains.push_back(Store);
- if (VR_idx != NumVRs) {
- SDValue Load =
- DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
- }
- ArgOffset += 16;
- for (unsigned i=0; i<16; i+=PtrByteSize) {
- if (GPR_idx == NumGPRs)
- break;
- SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
- DAG.getConstant(i, dl, PtrVT));
- SDValue Load =
- DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
- MemOpChains.push_back(Load.getValue(1));
- RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
- }
- break;
- }
-
- // Non-varargs Altivec params generally go in registers, but have
- // stack space allocated at the end.
- if (VR_idx != NumVRs) {
- // Doesn't have GPR space allocated.
- RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
- } else if (nAltivecParamsAtEnd==0) {
- // We are emitting Altivec params in order.
- LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
- isPPC64, CFlags.IsTailCall, true, MemOpChains,
- TailCallArguments, dl);
- ArgOffset += 16;
- }
- break;
- }
- }
- // If all Altivec parameters fit in registers, as they usually do,
- // they get stack space following the non-Altivec parameters. We
- // don't track this here because nobody below needs it.
- // If there are more Altivec parameters than fit in registers emit
- // the stores here.
- if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
- unsigned j = 0;
- // Offset is aligned; skip 1st 12 params which go in V registers.
- ArgOffset = ((ArgOffset+15)/16)*16;
- ArgOffset += 12*16;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = OutVals[i];
- EVT ArgType = Outs[i].VT;
- if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
- ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
- if (++j > NumVRs) {
- SDValue PtrOff;
- // We are emitting Altivec params in order.
- LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
- isPPC64, CFlags.IsTailCall, true, MemOpChains,
- TailCallArguments, dl);
- ArgOffset += 16;
- }
- }
- }
- }
-
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
-
- // On Darwin, R12 must contain the address of an indirect callee. This does
- // not mean the MTCTR instruction must use R12; it's easier to model this as
- // an extra parameter, so do that.
- if (CFlags.IsIndirect) {
- assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
- RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
- PPC::R12), Callee));
- }
-
- // Build a sequence of copy-to-reg nodes chained together with token chain
- // and flag operands which copy the outgoing args into the appropriate regs.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- if (CFlags.IsTailCall)
- PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
- TailCallArguments);
-
- return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
- Callee, SPDiff, NumBytes, Ins, InVals, CB);
-}
-
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
CCState &State) {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 40c1a5f18cf5..10df7c2feddf 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -152,8 +152,7 @@ namespace llvm {
/// probed.
PROBED_ALLOCA,
- /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
- /// at function entry, used for PIC code.
+ /// The result of the mflr at function entry, used for PIC code.
GlobalBaseReg,
/// These nodes represent PPC shifts.
@@ -1208,10 +1207,6 @@ namespace llvm {
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
- SDValue LowerFormalArguments_Darwin(
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerFormalArguments_64SVR4(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
@@ -1226,13 +1221,6 @@ namespace llvm {
ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
const SDLoc &dl) const;
- SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee, CallFlags CFlags,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- const SDLoc &dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals,
- const CallBase *CB) const;
SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee, CallFlags CFlags,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.h b/llvm/lib/Target/PowerPC/PPCSubtarget.h
index 4552defd657e..8f0034131bc5 100644
--- a/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -183,9 +183,6 @@ class PPCSubtarget : public PPCGenSubtargetInfo {
/// function for this subtarget.
Align getStackAlignment() const { return StackAlignment; }
- /// getDarwinDirective - Returns the -m directive specified for the cpu.
- unsigned getDarwinDirective() const { return CPUDirective; }
-
/// getCPUDirective - Returns the -m directive specified for the cpu.
///
unsigned getCPUDirective() const { return CPUDirective; }
More information about the llvm-branch-commits
mailing list