[llvm] r372164 - [AArch64][GlobalISel][NFC] Refactor tail call lowering code
Jessica Paquette via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 17 12:08:44 PDT 2019
Author: paquette
Date: Tue Sep 17 12:08:44 2019
New Revision: 372164
URL: http://llvm.org/viewvc/llvm-project?rev=372164&view=rev
Log:
[AArch64][GlobalISel][NFC] Refactor tail call lowering code
When you begin implementing -tailcallopt, this gets somewhat hairy. Refactor
the call lowering code so that the tail call lowering stuff gets its own
function.
Differential Revision: https://reviews.llvm.org/D67577
Modified:
llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp?rev=372164&r1=372163&r2=372164&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp Tue Sep 17 12:08:44 2019
@@ -687,6 +687,70 @@ static unsigned getCallOpcode(const Func
return AArch64::TCRETURNri;
}
+bool AArch64CallLowering::lowerTailCall(
+ MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
+ SmallVectorImpl<ArgInfo> &OutArgs) const {
+ MachineFunction &MF = MIRBuilder.getMF();
+ const Function &F = MF.getFunction();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
+
+ // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
+ // register class. Until we can do that, we should fall back here.
+ if (F.hasFnAttribute("branch-target-enforcement")) {
+ LLVM_DEBUG(
+ dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
+ return false;
+ }
+
+ // Find out which ABI gets to decide where things go.
+ CCAssignFn *AssignFnFixed =
+ TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/false);
+ CCAssignFn *AssignFnVarArg =
+ TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/true);
+
+ unsigned Opc = getCallOpcode(F, Info.Callee.isReg(), true);
+ auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
+ MIB.add(Info.Callee);
+
+ // Add the byte offset for the tail call. We only have sibling calls, so this
+ // is always 0.
+ // TODO: Handle tail calls where we will have a different value here.
+ MIB.addImm(0);
+
+ // Tell the call which registers are clobbered.
+ auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
+ if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
+ TRI->UpdateCustomCallPreservedMask(MF, &Mask);
+ MIB.addRegMask(Mask);
+
+ if (TRI->isAnyArgRegReserved(MF))
+ TRI->emitReservedArgRegCallError(MF);
+
+ // Do the actual argument marshalling.
+ SmallVector<unsigned, 8> PhysRegs;
+ OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
+ AssignFnVarArg, true);
+ if (!handleAssignments(MIRBuilder, OutArgs, Handler))
+ return false;
+
+ // Now we can add the actual call instruction to the correct basic block.
+ MIRBuilder.insertInstr(MIB);
+
+ // If Callee is a reg, since it is used by a target specific instruction,
+ // it must have a register class matching the constraint of that instruction.
+ if (Info.Callee.isReg())
+ MIB->getOperand(0).setReg(constrainOperandRegClass(
+ MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
+ *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
+ 0));
+
+ MF.getFrameInfo().setHasTailCall();
+ Info.LoweredTailCall = true;
+ return true;
+}
+
bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
CallLoweringInfo &Info) const {
MachineFunction &MF = MIRBuilder.getMF();
@@ -719,10 +783,10 @@ bool AArch64CallLowering::lowerCall(Mach
if (!Info.OrigRet.Ty->isVoidTy())
splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, F.getCallingConv());
- bool IsSibCall = Info.IsTailCall && isEligibleForTailCallOptimization(
- MIRBuilder, Info, InArgs, OutArgs);
- if (IsSibCall)
- MF.getFrameInfo().setHasTailCall();
+ // If we can lower as a tail call, do that instead.
+ if (Info.IsTailCall &&
+ isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs))
+ return lowerTailCall(MIRBuilder, Info, OutArgs, InArgs);
// Find out which ABI gets to decide where things go.
CCAssignFn *AssignFnFixed =
@@ -730,33 +794,16 @@ bool AArch64CallLowering::lowerCall(Mach
CCAssignFn *AssignFnVarArg =
TLI.CCAssignFnForCall(Info.CallConv, /*IsVarArg=*/true);
- // If we have a sibling call, then we don't have to adjust the stack.
- // Otherwise, we need to adjust it.
MachineInstrBuilder CallSeqStart;
- if (!IsSibCall)
- CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
+ CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
// Create a temporarily-floating call instruction so we can add the implicit
// uses of arg registers.
- unsigned Opc = getCallOpcode(F, Info.Callee.isReg(), IsSibCall);
-
- // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
- // register class. Until we can do that, we should fall back here.
- if (Opc == AArch64::TCRETURNriBTI) {
- LLVM_DEBUG(
- dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
- return false;
- }
+ unsigned Opc = getCallOpcode(F, Info.Callee.isReg(), false);
auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
MIB.add(Info.Callee);
- // Add the byte offset for the tail call. We only have sibling calls, so this
- // is always 0.
- // TODO: Handle tail calls where we will have a different value here.
- if (IsSibCall)
- MIB.addImm(0);
-
// Tell the call which registers are clobbered.
auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(MF, F.getCallingConv());
@@ -770,7 +817,7 @@ bool AArch64CallLowering::lowerCall(Mach
// Do the actual argument marshalling.
SmallVector<unsigned, 8> PhysRegs;
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
- AssignFnVarArg, IsSibCall);
+ AssignFnVarArg, false);
if (!handleAssignments(MIRBuilder, OutArgs, Handler))
return false;
@@ -786,13 +833,6 @@ bool AArch64CallLowering::lowerCall(Mach
*MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
0));
- // If we're tail calling, then we're the return from the block. So, we don't
- // want to copy anything.
- if (IsSibCall) {
- Info.LoweredTailCall = true;
- return true;
- }
-
// Finally we can copy the returned value back into its virtual-register. In
// symmetry with the arugments, the physical register must be an
// implicit-define of the call instruction.
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h?rev=372164&r1=372163&r2=372164&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h Tue Sep 17 12:08:44 2019
@@ -64,6 +64,9 @@ private:
const DataLayout &DL, MachineRegisterInfo &MRI,
CallingConv::ID CallConv) const;
+ bool lowerTailCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
+ SmallVectorImpl<ArgInfo> &OutArgs) const;
+
bool
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo &Info,
MachineFunction &MF,
More information about the llvm-commits
mailing list