[llvm] r364513 - [AArch64 GlobalISel] Cleanup CallLowering. NFCI
Diana Picus via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 27 02:24:30 PDT 2019
Author: rovka
Date: Thu Jun 27 02:24:30 2019
New Revision: 364513
URL: http://llvm.org/viewvc/llvm-project?rev=364513&view=rev
Log:
[AArch64 GlobalISel] Cleanup CallLowering. NFCI
Now that lowerCall and lowerFormalArgs have been refactored, we can
simplify splitToValueTypes.
Differential Revision: https://reviews.llvm.org/D63552
Modified:
llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp?rev=364513&r1=364512&r2=364513&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.cpp Thu Jun 27 02:24:30 2019
@@ -192,8 +192,7 @@ struct OutgoingArgHandler : public CallL
void AArch64CallLowering::splitToValueTypes(
const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
- const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
- const SplitArgTy &PerformArgSplit) const {
+ const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
LLVMContext &Ctx = OrigArg.Ty->getContext();
@@ -212,40 +211,20 @@ void AArch64CallLowering::splitToValueTy
return;
}
- if (OrigArg.Regs.size() > 1) {
- // Create one ArgInfo for each virtual register in the original ArgInfo.
- assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
-
- bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
- OrigArg.Ty, CallConv, false);
- for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
- Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
- SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
- OrigArg.IsFixed);
- if (NeedsRegBlock)
- SplitArgs.back().Flags.setInConsecutiveRegs();
- }
+ // Create one ArgInfo for each virtual register in the original ArgInfo.
+ assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
- SplitArgs.back().Flags.setInConsecutiveRegsLast();
- return;
- }
-
- unsigned FirstRegIdx = SplitArgs.size();
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
OrigArg.Ty, CallConv, false);
- for (auto SplitVT : SplitVTs) {
- Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
- SplitArgs.push_back(
- ArgInfo{MRI.createGenericVirtualRegister(getLLTForType(*SplitTy, DL)),
- SplitTy, OrigArg.Flags, OrigArg.IsFixed});
+ for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
+ Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
+ SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
+ OrigArg.IsFixed);
if (NeedsRegBlock)
SplitArgs.back().Flags.setInConsecutiveRegs();
}
SplitArgs.back().Flags.setInConsecutiveRegsLast();
-
- for (unsigned i = 0; i < Offsets.size(); ++i)
- PerformArgSplit(SplitArgs[FirstRegIdx + i].Regs[0], Offsets[i] * 8);
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
@@ -349,10 +328,7 @@ bool AArch64CallLowering::lowerReturn(Ma
// Reset the arg flags after modifying CurVReg.
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
}
- splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC,
- [&](unsigned Reg, uint64_t Offset) {
- MIRBuilder.buildExtract(Reg, CurVReg, Offset);
- });
+ splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
}
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
@@ -385,10 +361,7 @@ bool AArch64CallLowering::lowerFormalArg
ArgInfo OrigArg{VRegs[i], Arg.getType()};
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
- splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(),
- [&](Register Reg, uint64_t Offset) {
- llvm_unreachable("Args should already be split");
- });
+ splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
++i;
}
@@ -441,10 +414,7 @@ bool AArch64CallLowering::lowerCall(Mach
SmallVector<ArgInfo, 8> SplitArgs;
for (auto &OrigArg : OrigArgs) {
- splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv,
- [&](Register Reg, uint64_t Offset) {
- llvm_unreachable("Call params should already be split");
- });
+ splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv);
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
if (OrigArg.Ty->isIntegerTy(1))
SplitArgs.back().Flags.setZExt();
@@ -500,11 +470,7 @@ bool AArch64CallLowering::lowerCall(Mach
if (!OrigRet.Ty->isVoidTy()) {
SplitArgs.clear();
- splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
- [&](unsigned Reg, uint64_t Offset) {
- llvm_unreachable(
- "Call results should already be split");
- });
+ splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv());
CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h?rev=364513&r1=364512&r2=364513&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallLowering.h Thu Jun 27 02:24:30 2019
@@ -60,13 +60,10 @@ private:
using MemHandler =
std::function<void(MachineIRBuilder &, int, CCValAssign &)>;
- using SplitArgTy = std::function<void(unsigned, uint64_t)>;
-
void splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,
const DataLayout &DL, MachineRegisterInfo &MRI,
- CallingConv::ID CallConv,
- const SplitArgTy &SplitArg) const;
+ CallingConv::ID CallConv) const;
};
} // end namespace llvm
More information about the llvm-commits
mailing list