[llvm] 1778564 - [Alignment][NFC] Migrate the rest of backends
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 8 00:21:41 PDT 2020
Author: Guillaume Chatelet
Date: 2020-06-08T07:17:20Z
New Revision: 1778564f9118e29628a76a9cfea87d76fa83f0e6
URL: https://github.com/llvm/llvm-project/commit/1778564f9118e29628a76a9cfea87d76fa83f0e6
DIFF: https://github.com/llvm/llvm-project/commit/1778564f9118e29628a76a9cfea87d76fa83f0e6.diff
LOG: [Alignment][NFC] Migrate the rest of backends
Summary: This is a followup on D81196
Reviewers: courbet
Subscribers: arsenm, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, fedor.sergeev, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D81278
Added:
Modified:
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/ARC/ARCISelLowering.cpp
llvm/lib/Target/AVR/AVRISelLowering.cpp
llvm/lib/Target/Lanai/LanaiISelLowering.cpp
llvm/lib/Target/Mips/MipsCallLowering.cpp
llvm/lib/Target/Mips/MipsISelLowering.cpp
llvm/lib/Target/PowerPC/PPCFastISel.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/Sparc/SparcISelLowering.cpp
llvm/lib/Target/SystemZ/SystemZCallingConv.h
llvm/lib/Target/VE/VEISelLowering.cpp
llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
llvm/lib/Target/X86/X86CallingConv.cpp
llvm/lib/Target/X86/X86FastISel.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/XCore/XCoreISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4e134e84b9b3..36955ec22a8b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1764,7 +1764,7 @@ static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs);
if (RegIdx == ArgVGPRs.size()) {
// Spill to stack required.
- int64_t Offset = CCInfo.AllocateStack(4, 4);
+ int64_t Offset = CCInfo.AllocateStack(4, Align(4));
return ArgDescriptor::createStack(Offset, Mask);
}
@@ -2596,7 +2596,8 @@ void SITargetLowering::passSpecialInputs(
if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
report_fatal_error("failed to allocate implicit input argument");
} else {
- unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4);
+ unsigned SpecialArgOffset =
+ CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4));
SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
SpecialArgOffset);
MemOpChains.push_back(ArgStore);
@@ -2663,7 +2664,7 @@ void SITargetLowering::passSpecialInputs(
RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg);
CCInfo.AllocateReg(OutgoingArg->getRegister());
} else {
- unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4);
+ unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4));
SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg,
SpecialArgOffset);
MemOpChains.push_back(ArgStore);
diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp
index 04b1d4e29c3a..4a6510f10eeb 100644
--- a/llvm/lib/Target/ARC/ARCISelLowering.cpp
+++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp
@@ -245,7 +245,7 @@ SDValue ARCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Analyze return values to determine the number of bytes of stack required.
CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
- RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
+ RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4));
RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC);
// Get a count of how many bytes are to be pushed on the stack.
@@ -622,7 +622,7 @@ ARCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Analyze return values.
if (!IsVarArg)
- CCInfo.AllocateStack(AFI->getReturnStackOffset(), 4);
+ CCInfo.AllocateStack(AFI->getReturnStackOffset(), Align(4));
CCInfo.AnalyzeReturn(Outs, RetCC_ARC);
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 21363bbbf910..15efb62ebafb 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -993,8 +993,7 @@ static void analyzeStandardArguments(TargetLowering::CallLoweringInfo *CLI,
for (unsigned j = 0; j != Size; ++j) {
unsigned Offset = CCInfo.AllocateStack(
TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())),
- TD->getABITypeAlignment(
- EVT(LocVT).getTypeForEVT(CCInfo.getContext())));
+ TD->getABITypeAlign(EVT(LocVT).getTypeForEVT(CCInfo.getContext())));
CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT,
CCValAssign::Full));
}
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 956753740a02..32ccf7172594 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -388,7 +388,7 @@ static bool CC_Lanai32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
}
// VarArgs get passed on stack
- unsigned Offset = State.AllocateStack(4, 4);
+ unsigned Offset = State.AllocateStack(4, Align(4));
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return false;
}
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index b81ba5439b9b..d7f02683abf2 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -451,7 +451,7 @@ bool MipsCallLowering::lowerFormalArguments(
static_cast<const MipsTargetMachine &>(MF.getTarget());
const MipsABIInfo &ABI = TM.getABI();
CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
- 1);
+ Align(1));
CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
setLocInfo(ArgLocs, Ins);
@@ -572,7 +572,8 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs,
F.getContext());
- CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1);
+ CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv),
+ Align(1));
const char *Call =
Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 3018d8d77c19..c51848912eb4 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2963,7 +2963,8 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
llvm_unreachable("Cannot handle this ValVT.");
if (!Reg) {
- unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
+ unsigned Offset =
+ State.AllocateStack(ValVT.getStoreSize(), Align(OrigAlign));
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
} else
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -3209,7 +3210,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// caller side but removing it breaks the frame size calculation.
unsigned ReservedArgArea =
MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
- CCInfo.AllocateStack(ReservedArgArea, 1);
+ CCInfo.AllocateStack(ReservedArgArea, Align(1));
CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
ES ? ES->getSymbol() : nullptr);
@@ -3631,7 +3632,7 @@ SDValue MipsTargetLowering::LowerFormalArguments(
SmallVector<CCValAssign, 16> ArgLocs;
MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
- CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
+ CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
const Function &Func = DAG.getMachineFunction().getFunction();
Function::const_arg_iterator FuncArg = Func.arg_begin();
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index 679948e40c68..6608fd359d9a 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -1384,7 +1384,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
// Reserve space for the linkage area on the stack.
unsigned LinkageSize = PPCSubTarget->getFrameLowering()->getLinkageSize();
- CCInfo.AllocateStack(LinkageSize, 8);
+ CCInfo.AllocateStack(LinkageSize, Align(8));
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index ddff7abdbd1f..1e878a4630a8 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3638,7 +3638,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// Potential tail calls could cause overwriting of argument stack slots.
bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
(CallConv == CallingConv::Fast));
- unsigned PtrByteSize = 4;
+ const Align PtrAlign(4);
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -3647,7 +3647,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// Reserve space for the linkage area on the stack.
unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
- CCInfo.AllocateStack(LinkageSize, PtrByteSize);
+ CCInfo.AllocateStack(LinkageSize, PtrAlign);
if (useSoftFloat())
CCInfo.PreAnalyzeFormalArguments(Ins);
@@ -3756,7 +3756,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo.
- CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
+ CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
@@ -5705,7 +5705,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4(
CallConv == CallingConv::Cold ||
CallConv == CallingConv::Fast) && "Unknown calling convention!");
- unsigned PtrByteSize = 4;
+ const Align PtrAlign(4);
MachineFunction &MF = DAG.getMachineFunction();
@@ -5728,7 +5728,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4(
// Reserve space for the linkage area on the stack.
CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
- PtrByteSize);
+ PtrAlign);
if (useSoftFloat())
CCInfo.PreAnalyzeCallOperands(Outs);
@@ -5770,7 +5770,7 @@ SDValue PPCTargetLowering::LowerCall_32SVR4(
CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
// Reserve stack space for the allocations in CCInfo.
- CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
+ CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
@@ -6985,7 +6985,7 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
State.getMachineFunction().getSubtarget());
const bool IsPPC64 = Subtarget.isPPC64();
- const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
+ const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
assert((!ValVT.isInteger() ||
@@ -7009,7 +7009,7 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
PPC::X7, PPC::X8, PPC::X9, PPC::X10};
if (ArgFlags.isByVal()) {
- if (ArgFlags.getNonZeroByValAlign() > PtrByteSize)
+ if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
report_fatal_error("Pass-by-value arguments with alignment greater than "
"register width are not supported.");
@@ -7024,10 +7024,10 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
return false;
}
- const unsigned StackSize = alignTo(ByValSize, PtrByteSize);
- unsigned Offset = State.AllocateStack(StackSize, PtrByteSize);
+ const unsigned StackSize = alignTo(ByValSize, PtrAlign);
+ unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
for (const unsigned E = Offset + StackSize; Offset < E;
- Offset += PtrByteSize) {
+ Offset += PtrAlign.value()) {
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
else {
@@ -7050,7 +7050,7 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
LLVM_FALLTHROUGH;
case MVT::i1:
case MVT::i32: {
- const unsigned Offset = State.AllocateStack(PtrByteSize, PtrByteSize);
+ const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
// AIX integer arguments are always passed in register width.
if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
@@ -7068,13 +7068,14 @@ static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
const unsigned StoreSize = LocVT.getStoreSize();
// Floats are always 4-byte aligned in the PSA on AIX.
// This includes f64 in 64-bit mode for ABI compatibility.
- const unsigned Offset = State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4);
+ const unsigned Offset =
+ State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
unsigned FReg = State.AllocateReg(FPR);
if (FReg)
State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
// Reserve and initialize GPRs or initialize the PSA as required.
- for (unsigned I = 0; I < StoreSize; I += PtrByteSize) {
+ for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
assert(FReg && "An FPR should be available when a GPR is reserved.");
if (State.isVarArg()) {
@@ -7191,7 +7192,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
const EVT PtrVT = getPointerTy(MF.getDataLayout());
// Reserve space for the linkage area on the stack.
const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
- CCInfo.AllocateStack(LinkageSize, PtrByteSize);
+ CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
SmallVector<SDValue, 8> MemOps;
@@ -7413,7 +7414,7 @@ SDValue PPCTargetLowering::LowerCall_AIX(
const bool IsPPC64 = Subtarget.isPPC64();
const EVT PtrVT = getPointerTy(DAG.getDataLayout());
const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
- CCInfo.AllocateStack(LinkageSize, PtrByteSize);
+ CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
// The prolog code of the callee may store up to 8 GPR argument registers to
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b6e9d9f09e12..7c0b1dd6e382 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1461,11 +1461,11 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign());
State.addLoc(
CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
- State.AllocateStack(XLenInBytes, StackAlign),
+ State.AllocateStack(XLenInBytes, Align(StackAlign)),
VA1.getLocVT(), CCValAssign::Full));
State.addLoc(CCValAssign::getMem(
- ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
- CCValAssign::Full));
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
+ LocVT2, CCValAssign::Full));
return false;
}
@@ -1476,8 +1476,8 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
} else {
// The second half is passed via the stack, without additional alignment.
State.addLoc(CCValAssign::getMem(
- ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2,
- CCValAssign::Full));
+ ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
+ LocVT2, CCValAssign::Full));
}
return false;
@@ -1572,13 +1572,13 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
Register Reg = State.AllocateReg(ArgGPRs);
LocVT = MVT::i32;
if (!Reg) {
- unsigned StackOffset = State.AllocateStack(8, 8);
+ unsigned StackOffset = State.AllocateStack(8, Align(8));
State.addLoc(
CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
return false;
}
if (!State.AllocateReg(ArgGPRs))
- State.AllocateStack(4, 4);
+ State.AllocateStack(4, Align(4));
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -1618,7 +1618,8 @@ static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
else
Reg = State.AllocateReg(ArgGPRs);
- unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8);
+ unsigned StackOffset =
+ Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
// If we reach this point and PendingLocs is non-empty, we must be at the
// end of a split argument that must be passed indirectly.
@@ -1887,13 +1888,13 @@ static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
}
if (LocVT == MVT::i32 || LocVT == MVT::f32) {
- unsigned Offset4 = State.AllocateStack(4, 4);
+ unsigned Offset4 = State.AllocateStack(4, Align(4));
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::i64 || LocVT == MVT::f64) {
- unsigned Offset5 = State.AllocateStack(8, 8);
+ unsigned Offset5 = State.AllocateStack(8, Align(8));
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
return false;
}
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 22134437ff46..f598d4233ce9 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -63,9 +63,8 @@ static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
} else {
// Assign whole thing in stack.
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(8,4),
- LocVT, LocInfo));
+ State.addLoc(CCValAssign::getCustomMem(
+ ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
return true;
}
@@ -73,9 +72,8 @@ static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
if (unsigned Reg = State.AllocateReg(RegList))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(4,4),
- LocVT, LocInfo));
+ State.addLoc(CCValAssign::getCustomMem(
+ ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
return true;
}
@@ -112,7 +110,7 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
// Stack space is allocated for all arguments starting from [%fp+BIAS+128].
unsigned size = (LocVT == MVT::f128) ? 16 : 8;
- unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
+ Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8);
unsigned Offset = State.AllocateStack(size, alignment);
unsigned Reg = 0;
@@ -152,7 +150,7 @@ static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
- unsigned Offset = State.AllocateStack(4, 4);
+ unsigned Offset = State.AllocateStack(4, Align(4));
if (LocVT == MVT::f32 && Offset < 16*8) {
// Promote floats to %f0-%f31.
diff --git a/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/llvm/lib/Target/SystemZ/SystemZCallingConv.h
index 4432adc6a269..d4c7ce07420b 100644
--- a/llvm/lib/Target/SystemZ/SystemZCallingConv.h
+++ b/llvm/lib/Target/SystemZ/SystemZCallingConv.h
@@ -108,7 +108,7 @@ inline bool CC_SystemZ_I128Indirect(unsigned &ValNo, MVT &ValVT,
// the location (register or stack slot) for the indirect pointer.
// (This duplicates the usual i64 calling convention rules.)
unsigned Reg = State.AllocateReg(SystemZ::ArgGPRs);
- unsigned Offset = Reg ? 0 : State.AllocateStack(8, 8);
+ unsigned Offset = Reg ? 0 : State.AllocateStack(8, Align(8));
// Use that same location for all the pending parts.
for (auto &It : PendingMembers) {
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index cbdf861307b3..6648d3928c1f 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -49,9 +49,9 @@ static bool allocateFloat(unsigned ValNo, MVT ValVT, MVT LocVT,
// | empty| float|
// +------+------+
// Use align=8 for dummy area to align the beginning of these 2 area.
- State.AllocateStack(4, 8); // for empty area
+ State.AllocateStack(4, Align(8)); // for empty area
// Use align=4 for value to place it at just after the dummy area.
- unsigned Offset = State.AllocateStack(4, 4); // for float value area
+ unsigned Offset = State.AllocateStack(4, Align(4)); // for float value area
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
return true;
}
@@ -147,7 +147,7 @@ SDValue VETargetLowering::LowerFormalArguments(
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
// Allocate the preserved area first.
- CCInfo.AllocateStack(ArgsPreserved, 8);
+ CCInfo.AllocateStack(ArgsPreserved, Align(8));
// We already allocated the preserved area, so the stack offset computed
// by CC_VE would be correct now.
CCInfo.AnalyzeFormalArguments(Ins, CC_VE);
@@ -267,7 +267,7 @@ SDValue VETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
*DAG.getContext());
// Allocate the preserved area first.
- CCInfo.AllocateStack(ArgsPreserved, 8);
+ CCInfo.AllocateStack(ArgsPreserved, Align(8));
// We already allocated the preserved area, so the stack offset computed
// by CC_VE would be correct now.
CCInfo.AnalyzeCallOperands(CLI.Outs, CC_VE);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index f33a63b105a5..4c71137d5674 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -840,10 +840,10 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
EVT VT = Arg.getValueType();
assert(VT != MVT::iPTR && "Legalized args should be concrete");
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
- unsigned Align = std::max(Out.Flags.getOrigAlign(),
- Layout.getABITypeAlignment(Ty));
- unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
- Align);
+ Align Alignment =
+ std::max(Align(Out.Flags.getOrigAlign()), Layout.getABITypeAlign(Ty));
+ unsigned Offset =
+ CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
Offset, VT.getSimpleVT(),
CCValAssign::Full));
diff --git a/llvm/lib/Target/X86/X86CallingConv.cpp b/llvm/lib/Target/X86/X86CallingConv.cpp
index baf835ac58e6..c899db60e016 100644
--- a/llvm/lib/Target/X86/X86CallingConv.cpp
+++ b/llvm/lib/Target/X86/X86CallingConv.cpp
@@ -166,7 +166,7 @@ static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
State.getMachineFunction().getSubtarget().getRegisterInfo();
if (TRI->regsOverlap(Reg, X86::XMM4) ||
TRI->regsOverlap(Reg, X86::XMM5))
- State.AllocateStack(8, 8);
+ State.AllocateStack(8, Align(8));
if (!ArgFlags.isHva()) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -281,7 +281,7 @@ static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
if (UseRegs)
It.convertToReg(State.AllocateReg(RegList[FirstFree++]));
else
- It.convertToMem(State.AllocateStack(4, 4));
+ It.convertToMem(State.AllocateStack(4, Align(4)));
State.addLoc(It);
}
@@ -305,7 +305,7 @@ static bool CC_X86_Intr(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
if (ArgCount == 1 && ValNo == 0) {
// If we have one argument, the argument is five stack slots big, at fixed
// offset zero.
- Offset = State.AllocateStack(5 * SlotSize, 4);
+ Offset = State.AllocateStack(5 * SlotSize, Align(4));
} else if (ArgCount == 2 && ValNo == 0) {
// If we have two arguments, the stack slot is *after* the error code
// argument. Pretend it doesn't consume stack space, and account for it when
@@ -316,7 +316,7 @@ static bool CC_X86_Intr(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
// appears first on the stack, and is then followed by the five slot
// interrupt struct.
Offset = 0;
- (void)State.AllocateStack(6 * SlotSize, 4);
+ (void)State.AllocateStack(6 * SlotSize, Align(4));
} else {
report_fatal_error("unsupported x86 interrupt prototype");
}
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index c5ae67fe081d..74b4b7bb78c4 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3304,7 +3304,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Allocate shadow area for Win64
if (IsWin64)
- CCInfo.AllocateStack(32, 8);
+ CCInfo.AllocateStack(32, Align(8));
CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3bba9212b893..b194423a8371 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3559,7 +3559,7 @@ SDValue X86TargetLowering::LowerFormalArguments(
// Allocate shadow area for Win64.
if (IsWin64)
- CCInfo.AllocateStack(32, 8);
+ CCInfo.AllocateStack(32, Align(8));
CCInfo.AnalyzeArguments(Ins, CC_X86);
@@ -3898,7 +3898,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Allocate shadow area for Win64.
if (IsWin64)
- CCInfo.AllocateStack(32, 8);
+ CCInfo.AllocateStack(32, Align(8));
CCInfo.AnalyzeArguments(Outs, CC_X86);
@@ -4631,7 +4631,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(
// Allocate shadow area for Win64
if (IsCalleeWin64)
- CCInfo.AllocateStack(32, 8);
+ CCInfo.AllocateStack(32, Align(8));
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
StackArgsSize = CCInfo.getNextStackOffset();
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index d6b9d9b2f8bd..d8087282ff5d 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1119,7 +1119,7 @@ SDValue XCoreTargetLowering::LowerCCCCallTo(
// The ABI dictates there should be one stack slot available to the callee
// on function entry (for saving lr).
- CCInfo.AllocateStack(4, 4);
+ CCInfo.AllocateStack(4, Align(4));
CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
@@ -1127,7 +1127,7 @@ SDValue XCoreTargetLowering::LowerCCCCallTo(
// Analyze return values to determine the number of bytes of stack required.
CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
- RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4);
+ RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4));
RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
// Get a count of how many bytes are to be pushed on the stack.
@@ -1455,7 +1455,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Analyze return values.
if (!isVarArg)
- CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4);
+ CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4));
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
More information about the llvm-commits
mailing list