[llvm] [DataLayout] Change return type of `getStackAlignment` to `MaybeAlign` (PR #105478)
Sergei Barannikov via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 21 05:55:51 PDT 2024
https://github.com/s-barannikov updated https://github.com/llvm/llvm-project/pull/105478
>From ca9e2658c3c3f4d3abf525d6b8ce653a09e22eef Mon Sep 17 00:00:00 2001
From: Sergei Barannikov <barannikov88 at gmail.com>
Date: Wed, 21 Aug 2024 10:57:45 +0300
Subject: [PATCH 1/3] [DataLayout] Change return type of `getStackAlignment` to
`MaybeAlign`
Currently, `getStackAlignment` asserts if the stack alignment wasn't
specified. This makes it inconvenient to use and complicates testing.
This change also makes `exceedsNaturalStackAlignment` method redundant.
---
llvm/include/llvm/IR/DataLayout.h | 12 +++---------
llvm/lib/Transforms/IPO/ExpandVariadics.cpp | 6 +++---
llvm/lib/Transforms/Utils/Local.cpp | 3 ++-
llvm/unittests/IR/DataLayoutTest.cpp | 15 +++++++++++++++
4 files changed, 23 insertions(+), 13 deletions(-)
diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h
index 2f06bda6c30a51..145f1a29c7dfb7 100644
--- a/llvm/include/llvm/IR/DataLayout.h
+++ b/llvm/include/llvm/IR/DataLayout.h
@@ -220,15 +220,9 @@ class DataLayout {
bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
- /// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(Align Alignment) const {
- return StackNaturalAlign && (Alignment > *StackNaturalAlign);
- }
-
- Align getStackAlignment() const {
- assert(StackNaturalAlign && "StackNaturalAlign must be defined");
- return *StackNaturalAlign;
- }
+ /// Returns the natural stack alignment, or MaybeAlign() if one wasn't
+ /// specified.
+ MaybeAlign getStackAlignment() const { return StackNaturalAlign; }
unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }
diff --git a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
index 49bfec297bc173..a7a01ca1055dd3 100644
--- a/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
+++ b/llvm/lib/Transforms/IPO/ExpandVariadics.cpp
@@ -748,10 +748,10 @@ bool ExpandVariadics::expandCall(Module &M, IRBuilder<> &Builder, CallBase *CB,
// This is an awkward way to guess whether there is a known stack alignment
// without hitting an assert in DL.getStackAlignment, 1024 is an arbitrary
// number likely to be greater than the natural stack alignment.
- // TODO: DL.getStackAlignment could return a MaybeAlign instead of assert
Align AllocaAlign = MaxFieldAlign;
- if (DL.exceedsNaturalStackAlignment(Align(1024)))
- AllocaAlign = std::max(AllocaAlign, DL.getStackAlignment());
+ if (MaybeAlign StackAlign = DL.getStackAlignment();
+ StackAlign && *StackAlign > AllocaAlign)
+ AllocaAlign = *StackAlign;
// Put the alloca to hold the variadic args in the entry basic block.
Builder.SetInsertPointPastAllocas(CBF);
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index efb02fdec56d7e..df0924dd8e4208 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1506,7 +1506,8 @@ Align llvm::tryEnforceAlignment(Value *V, Align PrefAlign,
// If the preferred alignment is greater than the natural stack alignment
// then don't round up. This avoids dynamic stack realignment.
- if (DL.exceedsNaturalStackAlignment(PrefAlign))
+ MaybeAlign StackAlign = DL.getStackAlignment();
+ if (StackAlign && PrefAlign > *StackAlign)
return CurrentAlign;
AI->setAlignment(PrefAlign);
return PrefAlign;
diff --git a/llvm/unittests/IR/DataLayoutTest.cpp b/llvm/unittests/IR/DataLayoutTest.cpp
index 396d44af19f53f..16a603ff6416f4 100644
--- a/llvm/unittests/IR/DataLayoutTest.cpp
+++ b/llvm/unittests/IR/DataLayoutTest.cpp
@@ -444,6 +444,21 @@ TEST(DataLayout, ParseNonIntegralAddrSpace) {
FailedWithMessage("address space 0 cannot be non-integral"));
}
+TEST(DataLayout, GetStackAlignment) {
+ DataLayout Default;
+ EXPECT_FALSE(Default.getStackAlignment().has_value());
+
+ std::pair<StringRef, Align> Cases[] = {
+ {"S8", Align(1)},
+ {"S64", Align(8)},
+ {"S32768", Align(4096)},
+ };
+ for (auto [Layout, Val] : Cases) {
+ DataLayout DL = cantFail(DataLayout::parse(Layout));
+ EXPECT_EQ(DL.getStackAlignment(), Val) << Layout;
+ }
+}
+
TEST(DataLayout, GetPointerSizeInBits) {
std::tuple<StringRef, unsigned, unsigned, unsigned> Cases[] = {
{"", 64, 64, 64},
>From 307bcb7e2d6c7a8281f17e280a56d069241db738 Mon Sep 17 00:00:00 2001
From: Sergei Barannikov <barannikov88 at gmail.com>
Date: Wed, 21 Aug 2024 15:19:22 +0300
Subject: [PATCH 2/3] Fix remaining uses
---
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 8 ++++----
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 12 ++++++------
llvm/lib/Target/AArch64/AArch64CallingConvention.cpp | 2 +-
llvm/lib/Target/ARM/ARMCallingConv.cpp | 2 +-
llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 +++---
.../Target/WebAssembly/WebAssemblyISelLowering.cpp | 2 +-
6 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index bdbef20e20960d..d455de6df03238 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -9134,8 +9134,8 @@ LegalizerHelper::lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->hasStackRealignment(MF))
- while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
- NewAlign = NewAlign.previous();
+ if (MaybeAlign StackAlign = DL.getStackAlignment())
+ NewAlign = std::min(NewAlign, *StackAlign);
if (NewAlign > Alignment) {
Alignment = NewAlign;
@@ -9242,8 +9242,8 @@ LegalizerHelper::lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->hasStackRealignment(MF))
- while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
- NewAlign = NewAlign.previous();
+ if (MaybeAlign StackAlign = DL.getStackAlignment())
+ NewAlign = std::min(NewAlign, *StackAlign);
if (NewAlign > Alignment) {
Alignment = NewAlign;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 18a3b7bce104a7..4ff70238e28d80 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7894,8 +7894,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
// optimization.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->hasStackRealignment(MF))
- while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
- NewAlign = NewAlign.previous();
+ if (MaybeAlign StackAlign = DL.getStackAlignment())
+ NewAlign = std::min(NewAlign, *StackAlign);
if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed.
@@ -8089,8 +8089,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
// optimization.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->hasStackRealignment(MF))
- while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
- NewAlign = NewAlign.previous();
+ if (MaybeAlign StackAlign = DL.getStackAlignment())
+ NewAlign = std::min(NewAlign, *StackAlign);
if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed.
@@ -8207,8 +8207,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
// optimization.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->hasStackRealignment(MF))
- while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
- NewAlign = NewAlign.previous();
+ if (MaybeAlign StackAlign = DL.getStackAlignment())
+ NewAlign = std::min(NewAlign, *StackAlign);
if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed.
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index 9a804c12939c4b..85ec4745904652 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -210,7 +210,7 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
}
const Align StackAlign =
- State.getMachineFunction().getDataLayout().getStackAlignment();
+ *State.getMachineFunction().getDataLayout().getStackAlignment();
const Align MemAlign = ArgFlags.getNonZeroMemAlign();
Align SlotAlign = std::min(MemAlign, StackAlign);
if (!Subtarget.isTargetDarwin())
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.cpp b/llvm/lib/Target/ARM/ARMCallingConv.cpp
index 4878c73138940d..aafa24fad2d81d 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.cpp
+++ b/llvm/lib/Target/ARM/ARMCallingConv.cpp
@@ -190,7 +190,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
// Try to allocate a contiguous block of registers, each of the correct
// size to hold one member.
auto &DL = State.getMachineFunction().getDataLayout();
- const Align StackAlign = DL.getStackAlignment();
+ const Align StackAlign = *DL.getStackAlignment();
const Align FirstMemberAlign(PendingMembers[0].getExtraInfo());
Align Alignment = std::min(FirstMemberAlign, StackAlign);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 1e8bb8a495e68b..cd3259d752b298 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2461,7 +2461,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Since callee will pop argument stack as a tail call, we must keep the
// popped size 16-byte aligned.
- Align StackAlign = DAG.getDataLayout().getStackAlignment();
+ Align StackAlign = *DAG.getDataLayout().getStackAlignment();
NumBytes = alignTo(NumBytes, StackAlign);
// SPDiff will be negative if this tail call requires more space than we
@@ -4712,7 +4712,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
// The only way to guarantee a tail call is if the callee restores its
// argument area, but it must also keep the stack aligned when doing so.
const DataLayout &DL = DAG.getDataLayout();
- StackArgSize = alignTo(StackArgSize, DL.getStackAlignment());
+ StackArgSize = alignTo(StackArgSize, *DL.getStackAlignment());
AFI->setArgumentStackToRestore(StackArgSize);
}
@@ -22030,7 +22030,7 @@ Align ARMTargetLowering::getABIAlignmentForCallingConv(
// Avoid over-aligning vector parameters. It would require realigning the
// stack and waste space for no real benefit.
- return std::min(ABITypeAlign, DL.getStackAlignment());
+ return std::min(ABITypeAlign, *DL.getStackAlignment());
}
/// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 563601b722c803..b28d3ec034ec7f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1190,7 +1190,7 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
// For non-fixed arguments, next emit stores to store the argument values
// to the stack buffer at the offsets computed above.
int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
- Layout.getStackAlignment(),
+ *Layout.getStackAlignment(),
/*isSS=*/false);
unsigned ValNo = 0;
SmallVector<SDValue, 8> Chains;
>From 818641d6b314b447ca72dcb8fedf01ea62dedb0a Mon Sep 17 00:00:00 2001
From: Sergei Barannikov <barannikov88 at gmail.com>
Date: Wed, 21 Aug 2024 15:55:34 +0300
Subject: [PATCH 3/3] Check that MaybeAlign contains a value before accessing
it
---
.../Target/AArch64/AArch64CallingConvention.cpp | 7 ++++---
llvm/lib/Target/ARM/ARMCallingConv.cpp | 5 +++--
llvm/lib/Target/ARM/ARMISelLowering.cpp | 14 +++++++++-----
.../Target/WebAssembly/WebAssemblyISelLowering.cpp | 5 +++--
4 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index 85ec4745904652..1eb34e7ca4ddc6 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -209,10 +209,11 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
State.AllocateReg(Reg);
}
- const Align StackAlign =
- *State.getMachineFunction().getDataLayout().getStackAlignment();
+ const MaybeAlign StackAlign =
+ State.getMachineFunction().getDataLayout().getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
const Align MemAlign = ArgFlags.getNonZeroMemAlign();
- Align SlotAlign = std::min(MemAlign, StackAlign);
+ Align SlotAlign = std::min(MemAlign, *StackAlign);
if (!Subtarget.isTargetDarwin())
SlotAlign = std::max(SlotAlign, Align(8));
diff --git a/llvm/lib/Target/ARM/ARMCallingConv.cpp b/llvm/lib/Target/ARM/ARMCallingConv.cpp
index aafa24fad2d81d..f7a3ef8b314aba 100644
--- a/llvm/lib/Target/ARM/ARMCallingConv.cpp
+++ b/llvm/lib/Target/ARM/ARMCallingConv.cpp
@@ -190,9 +190,10 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
// Try to allocate a contiguous block of registers, each of the correct
// size to hold one member.
auto &DL = State.getMachineFunction().getDataLayout();
- const Align StackAlign = *DL.getStackAlignment();
+ const MaybeAlign StackAlign = DL.getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
const Align FirstMemberAlign(PendingMembers[0].getExtraInfo());
- Align Alignment = std::min(FirstMemberAlign, StackAlign);
+ Align Alignment = std::min(FirstMemberAlign, *StackAlign);
ArrayRef<MCPhysReg> RegList;
switch (LocVT.SimpleTy) {
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index cd3259d752b298..827867dc011126 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2461,8 +2461,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Since callee will pop argument stack as a tail call, we must keep the
// popped size 16-byte aligned.
- Align StackAlign = *DAG.getDataLayout().getStackAlignment();
- NumBytes = alignTo(NumBytes, StackAlign);
+ MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
+ NumBytes = alignTo(NumBytes, *StackAlign);
// SPDiff will be negative if this tail call requires more space than we
// would automatically have in our incoming argument space. Positive if we
@@ -4711,8 +4712,9 @@ SDValue ARMTargetLowering::LowerFormalArguments(
if (canGuaranteeTCO(CallConv, TailCallOpt)) {
// The only way to guarantee a tail call is if the callee restores its
// argument area, but it must also keep the stack aligned when doing so.
- const DataLayout &DL = DAG.getDataLayout();
- StackArgSize = alignTo(StackArgSize, *DL.getStackAlignment());
+ MaybeAlign StackAlign = DAG.getDataLayout().getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
+ StackArgSize = alignTo(StackArgSize, *StackAlign);
AFI->setArgumentStackToRestore(StackArgSize);
}
@@ -22030,7 +22032,9 @@ Align ARMTargetLowering::getABIAlignmentForCallingConv(
// Avoid over-aligning vector parameters. It would require realigning the
// stack and waste space for no real benefit.
- return std::min(ABITypeAlign, *DL.getStackAlignment());
+ MaybeAlign StackAlign = DL.getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
+ return std::min(ABITypeAlign, *StackAlign);
}
/// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index b28d3ec034ec7f..6f2af7fa3c1598 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1189,8 +1189,9 @@ WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (IsVarArg && NumBytes) {
// For non-fixed arguments, next emit stores to store the argument values
// to the stack buffer at the offsets computed above.
- int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
- *Layout.getStackAlignment(),
+ MaybeAlign StackAlign = Layout.getStackAlignment();
+ assert(StackAlign && "data layout string is missing stack alignment");
+ int FI = MF.getFrameInfo().CreateStackObject(NumBytes, *StackAlign,
/*isSS=*/false);
unsigned ValNo = 0;
SmallVector<SDValue, 8> Chains;
More information about the llvm-commits
mailing list