[llvm] r373081 - [Alignment][NFC] Remove unneeded llvm:: scoping on Align types
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 27 05:54:22 PDT 2019
Author: gchatelet
Date: Fri Sep 27 05:54:21 2019
New Revision: 373081
URL: http://llvm.org/viewvc/llvm-project?rev=373081&view=rev
Log:
[Alignment][NFC] Remove unneeded llvm:: scoping on Align types
Modified:
llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h
llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h
llvm/trunk/include/llvm/CodeGen/AsmPrinter.h
llvm/trunk/include/llvm/CodeGen/CallingConvLower.h
llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h
llvm/trunk/include/llvm/CodeGen/MachineFrameInfo.h
llvm/trunk/include/llvm/CodeGen/MachineFunction.h
llvm/trunk/include/llvm/CodeGen/TargetCallingConv.h
llvm/trunk/include/llvm/CodeGen/TargetLowering.h
llvm/trunk/include/llvm/IR/DataLayout.h
llvm/trunk/include/llvm/IR/Instructions.h
llvm/trunk/include/llvm/MC/MCSection.h
llvm/trunk/include/llvm/Support/Alignment.h
llvm/trunk/include/llvm/Support/OnDiskHashTable.h
llvm/trunk/lib/Analysis/MemoryBuiltins.cpp
llvm/trunk/lib/Analysis/TargetTransformInfo.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/EHStreamer.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
llvm/trunk/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
llvm/trunk/lib/CodeGen/BranchRelaxation.cpp
llvm/trunk/lib/CodeGen/CallingConvLower.cpp
llvm/trunk/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp
llvm/trunk/lib/CodeGen/MIRPrinter.cpp
llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp
llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
llvm/trunk/lib/CodeGen/MachineFrameInfo.cpp
llvm/trunk/lib/CodeGen/MachineFunction.cpp
llvm/trunk/lib/CodeGen/PatchableFunction.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/trunk/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
llvm/trunk/lib/IR/DataLayout.cpp
llvm/trunk/lib/IR/Instructions.cpp
llvm/trunk/lib/IR/Value.cpp
llvm/trunk/lib/MC/ELFObjectWriter.cpp
llvm/trunk/lib/MC/MCAssembler.cpp
llvm/trunk/lib/MC/MCELFStreamer.cpp
llvm/trunk/lib/MC/MCObjectStreamer.cpp
llvm/trunk/lib/MC/MCWinCOFFStreamer.cpp
llvm/trunk/lib/MC/MachObjectWriter.cpp
llvm/trunk/lib/Object/ArchiveWriter.cpp
llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp
llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
llvm/trunk/lib/Target/AMDGPU/R600AsmPrinter.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
llvm/trunk/lib/Target/ARC/ARCMachineFunctionInfo.h
llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.cpp
llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.h
llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp
llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp
llvm/trunk/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp
llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp
llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp
llvm/trunk/lib/Target/PowerPC/PPCBranchSelector.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/trunk/lib/Target/SystemZ/SystemZLongBranch.cpp
llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86RetpolineThunks.cpp
llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h
llvm/trunk/lib/Target/XCore/XCoreAsmPrinter.cpp
llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp
llvm/trunk/lib/Transforms/Utils/Local.cpp
llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
llvm/trunk/tools/dsymutil/DwarfStreamer.cpp
llvm/trunk/tools/llvm-cov/TestingSupport.cpp
llvm/trunk/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp
Modified: llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h (original)
+++ llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h Fri Sep 27 05:54:21 2019
@@ -580,9 +580,9 @@ public:
bool isLegalMaskedLoad(Type *DataType) const;
/// Return true if the target supports nontemporal store.
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) const;
+ bool isLegalNTStore(Type *DataType, Align Alignment) const;
/// Return true if the target supports nontemporal load.
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) const;
+ bool isLegalNTLoad(Type *DataType, Align Alignment) const;
/// Return true if the target supports masked scatter.
bool isLegalMaskedScatter(Type *DataType) const;
@@ -1196,8 +1196,8 @@ public:
virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
virtual bool isLegalMaskedStore(Type *DataType) = 0;
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
- virtual bool isLegalNTStore(Type *DataType, llvm::Align Alignment) = 0;
- virtual bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) = 0;
+ virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
+ virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0;
virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
@@ -1471,10 +1471,10 @@ public:
bool isLegalMaskedLoad(Type *DataType) override {
return Impl.isLegalMaskedLoad(DataType);
}
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) override {
+ bool isLegalNTStore(Type *DataType, Align Alignment) override {
return Impl.isLegalNTStore(DataType, Alignment);
}
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) override {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) override {
return Impl.isLegalNTLoad(DataType, Alignment);
}
bool isLegalMaskedScatter(Type *DataType) override {
Modified: llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h (original)
+++ llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h Fri Sep 27 05:54:21 2019
@@ -247,14 +247,14 @@ public:
bool isLegalMaskedLoad(Type *DataType) { return false; }
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment) {
+ bool isLegalNTStore(Type *DataType, Align Alignment) {
// By default, assume nontemporal memory stores are available for stores
// that are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) {
// By default, assume nontemporal memory loads are available for loads that
// are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
Modified: llvm/trunk/include/llvm/CodeGen/AsmPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/AsmPrinter.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/AsmPrinter.h (original)
+++ llvm/trunk/include/llvm/CodeGen/AsmPrinter.h Fri Sep 27 05:54:21 2019
@@ -350,7 +350,7 @@ public:
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
/// correctness.
- void EmitAlignment(llvm::Align Align, const GlobalObject *GV = nullptr) const;
+ void EmitAlignment(Align Alignment, const GlobalObject *GV = nullptr) const;
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
@@ -643,8 +643,8 @@ public:
void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
/// Return the alignment for the specified \p GV.
- static llvm::Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
- llvm::Align InAlign = llvm::Align::None());
+ static Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
+ Align InAlign = Align::None());
private:
/// Private state for PrintSpecial()
Modified: llvm/trunk/include/llvm/CodeGen/CallingConvLower.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/CallingConvLower.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/CallingConvLower.h (original)
+++ llvm/trunk/include/llvm/CodeGen/CallingConvLower.h Fri Sep 27 05:54:21 2019
@@ -424,18 +424,18 @@ public:
/// AllocateStack - Allocate a chunk of stack space with the specified size
/// and alignment.
unsigned AllocateStack(unsigned Size, unsigned Alignment) {
- const llvm::Align Align(Alignment);
- StackOffset = alignTo(StackOffset, Align);
+ const Align CheckedAlignment(Alignment);
+ StackOffset = alignTo(StackOffset, CheckedAlignment);
unsigned Result = StackOffset;
StackOffset += Size;
- MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
- ensureMaxAlignment(Align);
+ MaxStackArgAlign = std::max(CheckedAlignment, MaxStackArgAlign);
+ ensureMaxAlignment(CheckedAlignment);
return Result;
}
- void ensureMaxAlignment(llvm::Align Align) {
+ void ensureMaxAlignment(Align Alignment) {
if (!AnalyzingMustTailForwardedRegs)
- MF.getFrameInfo().ensureMaxAlignment(Align.value());
+ MF.getFrameInfo().ensureMaxAlignment(Alignment.value());
}
/// Version of AllocateStack with extra register to be shadowed.
Modified: llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineBasicBlock.h Fri Sep 27 05:54:21 2019
@@ -105,7 +105,7 @@ private:
/// Alignment of the basic block. One if the basic block does not need to be
/// aligned.
- llvm::Align Alignment;
+ Align Alignment;
/// Indicate that this basic block is entered via an exception handler.
bool IsEHPad = false;
@@ -373,10 +373,10 @@ public:
const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
/// Return alignment of the basic block.
- llvm::Align getAlignment() const { return Alignment; }
+ Align getAlignment() const { return Alignment; }
/// Set alignment of the basic block.
- void setAlignment(llvm::Align A) { Alignment = A; }
+ void setAlignment(Align A) { Alignment = A; }
/// Returns true if the block is a landing pad. That is this basic block is
/// entered via an exception handler.
Modified: llvm/trunk/include/llvm/CodeGen/MachineFrameInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineFrameInfo.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineFrameInfo.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineFrameInfo.h Fri Sep 27 05:54:21 2019
@@ -181,7 +181,7 @@ private:
uint8_t SSPLayout;
- StackObject(uint64_t Size, llvm::Align Alignment, int64_t SPOffset,
+ StackObject(uint64_t Size, Align Alignment, int64_t SPOffset,
bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
bool IsAliased, uint8_t StackID = 0)
: SPOffset(SPOffset), Size(Size), Alignment(Alignment),
@@ -419,7 +419,9 @@ public:
/// Required alignment of the local object blob,
/// which is the strictest alignment of any object in it.
- void setLocalFrameMaxAlign(Align Align) { LocalFrameMaxAlign = Align; }
+ void setLocalFrameMaxAlign(Align Alignment) {
+ LocalFrameMaxAlign = Alignment;
+ }
/// Return the required alignment of the local object blob.
Align getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
@@ -564,7 +566,7 @@ public:
unsigned getMaxAlignment() const { return MaxAlignment.value(); }
/// Make sure the function is at least Align bytes aligned.
- void ensureMaxAlignment(llvm::Align Align);
+ void ensureMaxAlignment(Align Alignment);
/// FIXME: Remove this once transition to Align is over.
inline void ensureMaxAlignment(unsigned Align) {
ensureMaxAlignment(assumeAligned(Align));
@@ -732,9 +734,9 @@ public:
/// Create a new statically sized stack object, returning
/// a nonnegative identifier to represent it.
- int CreateStackObject(uint64_t Size, llvm::Align Alignment, bool isSpillSlot,
+ int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot,
const AllocaInst *Alloca = nullptr, uint8_t ID = 0);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ /// FIXME: Remove this function when transition to Align is over.
inline int CreateStackObject(uint64_t Size, unsigned Alignment,
bool isSpillSlot,
const AllocaInst *Alloca = nullptr,
@@ -745,8 +747,8 @@ public:
/// Create a new statically sized stack object that represents a spill slot,
/// returning a nonnegative identifier to represent it.
- int CreateSpillStackObject(uint64_t Size, llvm::Align Alignment);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ int CreateSpillStackObject(uint64_t Size, Align Alignment);
+ /// FIXME: Remove this function when transition to Align is over.
inline int CreateSpillStackObject(uint64_t Size, unsigned Alignment) {
return CreateSpillStackObject(Size, assumeAligned(Alignment));
}
@@ -760,9 +762,8 @@ public:
/// Notify the MachineFrameInfo object that a variable sized object has been
/// created. This must be created whenever a variable sized object is
/// created, whether or not the index returned is actually used.
- int CreateVariableSizedObject(llvm::Align Alignment,
- const AllocaInst *Alloca);
- /// FIXME: Remove this function when transition to llvm::Align is over.
+ int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca);
+ /// FIXME: Remove this function when transition to Align is over.
int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca) {
return CreateVariableSizedObject(assumeAligned(Alignment), Alloca);
}
Modified: llvm/trunk/include/llvm/CodeGen/MachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/MachineFunction.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/MachineFunction.h (original)
+++ llvm/trunk/include/llvm/CodeGen/MachineFunction.h Fri Sep 27 05:54:21 2019
@@ -277,7 +277,7 @@ class MachineFunction {
unsigned FunctionNumber;
/// Alignment - The alignment of the function.
- llvm::Align Alignment;
+ Align Alignment;
/// ExposesReturnsTwice - True if the function calls setjmp or related
/// functions with attribute "returns twice", but doesn't have
@@ -509,13 +509,13 @@ public:
WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
/// getAlignment - Return the alignment of the function.
- llvm::Align getAlignment() const { return Alignment; }
+ Align getAlignment() const { return Alignment; }
/// setAlignment - Set the alignment of the function.
- void setAlignment(llvm::Align A) { Alignment = A; }
+ void setAlignment(Align A) { Alignment = A; }
/// ensureAlignment - Make sure the function is at least A bytes aligned.
- void ensureAlignment(llvm::Align A) {
+ void ensureAlignment(Align A) {
if (Alignment < A)
Alignment = A;
}
Modified: llvm/trunk/include/llvm/CodeGen/TargetCallingConv.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetCallingConv.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetCallingConv.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetCallingConv.h Fri Sep 27 05:54:21 2019
@@ -126,7 +126,7 @@ namespace ISD {
return A ? A->value() : 0;
}
void setByValAlign(unsigned A) {
- ByValAlign = encode(llvm::Align(A));
+ ByValAlign = encode(Align(A));
assert(getByValAlign() == A && "bitfield overflow");
}
@@ -135,7 +135,7 @@ namespace ISD {
return A ? A->value() : 0;
}
void setOrigAlign(unsigned A) {
- OrigAlign = encode(llvm::Align(A));
+ OrigAlign = encode(Align(A));
assert(getOrigAlign() == A && "bitfield overflow");
}
Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Fri Sep 27 05:54:21 2019
@@ -1596,18 +1596,18 @@ public:
}
/// Return the minimum stack alignment of an argument.
- llvm::Align getMinStackArgumentAlignment() const {
+ Align getMinStackArgumentAlignment() const {
return MinStackArgumentAlignment;
}
/// Return the minimum function alignment.
- llvm::Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
+ Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
/// Return the preferred function alignment.
- llvm::Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
+ Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
/// Return the preferred loop alignment.
- virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+ virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
return PrefLoopAlignment;
}
@@ -2120,24 +2120,24 @@ protected:
}
/// Set the target's minimum function alignment.
- void setMinFunctionAlignment(llvm::Align Align) {
- MinFunctionAlignment = Align;
+ void setMinFunctionAlignment(Align Alignment) {
+ MinFunctionAlignment = Alignment;
}
/// Set the target's preferred function alignment. This should be set if
/// there is a performance benefit to higher-than-minimum alignment
- void setPrefFunctionAlignment(llvm::Align Align) {
- PrefFunctionAlignment = Align;
+ void setPrefFunctionAlignment(Align Alignment) {
+ PrefFunctionAlignment = Alignment;
}
/// Set the target's preferred loop alignment. Default alignment is one, it
/// means the target does not care about loop alignment. The target may also
/// override getPrefLoopAlignment to provide per-loop values.
- void setPrefLoopAlignment(llvm::Align Align) { PrefLoopAlignment = Align; }
+ void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
/// Set the minimum stack alignment of an argument.
- void setMinStackArgumentAlignment(llvm::Align Align) {
- MinStackArgumentAlignment = Align;
+ void setMinStackArgumentAlignment(Align Alignment) {
+ MinStackArgumentAlignment = Alignment;
}
/// Set the maximum atomic operation size supported by the
@@ -2699,18 +2699,18 @@ private:
Sched::Preference SchedPreferenceInfo;
/// The minimum alignment that any argument on the stack needs to have.
- llvm::Align MinStackArgumentAlignment;
+ Align MinStackArgumentAlignment;
/// The minimum function alignment (used when optimizing for size, and to
/// prevent explicitly provided alignment from leading to incorrect code).
- llvm::Align MinFunctionAlignment;
+ Align MinFunctionAlignment;
/// The preferred function alignment (used when alignment unspecified and
/// optimizing for speed).
- llvm::Align PrefFunctionAlignment;
+ Align PrefFunctionAlignment;
/// The preferred loop alignment (in log2 bot in bytes).
- llvm::Align PrefLoopAlignment;
+ Align PrefLoopAlignment;
/// Size in bits of the maximum atomics size the backend supports.
/// Accesses larger than this will be expanded by AtomicExpandPass.
Modified: llvm/trunk/include/llvm/IR/DataLayout.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/DataLayout.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/DataLayout.h (original)
+++ llvm/trunk/include/llvm/IR/DataLayout.h Fri Sep 27 05:54:21 2019
@@ -72,11 +72,11 @@ struct LayoutAlignElem {
/// Alignment type from \c AlignTypeEnum
unsigned AlignType : 8;
unsigned TypeBitWidth : 24;
- llvm::Align ABIAlign;
- llvm::Align PrefAlign;
+ Align ABIAlign;
+ Align PrefAlign;
- static LayoutAlignElem get(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width);
+ static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width);
bool operator==(const LayoutAlignElem &rhs) const;
};
@@ -88,15 +88,15 @@ struct LayoutAlignElem {
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct PointerAlignElem {
- llvm::Align ABIAlign;
- llvm::Align PrefAlign;
+ Align ABIAlign;
+ Align PrefAlign;
uint32_t TypeByteWidth;
uint32_t AddressSpace;
uint32_t IndexWidth;
/// Initializer
- static PointerAlignElem get(uint32_t AddressSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign, uint32_t TypeByteWidth,
+ static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth);
bool operator==(const PointerAlignElem &rhs) const;
@@ -173,16 +173,15 @@ private:
/// well-defined bitwise representation.
SmallVector<unsigned, 8> NonIntegralAddressSpaces;
- void setAlignment(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width);
- llvm::Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, Type *Ty) const;
- void setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign, uint32_t TypeByteWidth,
- uint32_t IndexWidth);
+ void setAlignment(AlignTypeEnum align_type, Align abi_align, Align pref_align,
+ uint32_t bit_width);
+ Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+ bool ABIAlign, Type *Ty) const;
+ void setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign,
+ uint32_t TypeByteWidth, uint32_t IndexWidth);
/// Internal helper method that returns requested alignment for type.
- llvm::Align getAlignment(Type *Ty, bool abi_or_pref) const;
+ Align getAlignment(Type *Ty, bool abi_or_pref) const;
/// Parses a target data specification string. Assert if the string is
/// malformed.
@@ -262,11 +261,11 @@ public:
bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
/// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(llvm::Align Align) const {
- return StackNaturalAlign && (Align > StackNaturalAlign);
+ bool exceedsNaturalStackAlignment(Align Alignment) const {
+ return StackNaturalAlign && (Alignment > StackNaturalAlign);
}
- llvm::Align getStackAlignment() const {
+ Align getStackAlignment() const {
assert(StackNaturalAlign && "StackNaturalAlign must be defined");
return *StackNaturalAlign;
}
@@ -349,12 +348,12 @@ public:
}
/// Layout pointer alignment
- llvm::Align getPointerABIAlignment(unsigned AS) const;
+ Align getPointerABIAlignment(unsigned AS) const;
/// Return target's alignment for stack-based pointers
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
- llvm::Align getPointerPrefAlignment(unsigned AS = 0) const;
+ Align getPointerPrefAlignment(unsigned AS = 0) const;
/// Layout pointer size
/// FIXME: The defaults need to be removed once all of
@@ -490,7 +489,7 @@ public:
/// Returns the minimum ABI-required alignment for an integer type of
/// the specified bitwidth.
- llvm::Align getABIIntegerTypeAlignment(unsigned BitWidth) const;
+ Align getABIIntegerTypeAlignment(unsigned BitWidth) const;
/// Returns the preferred stack/global alignment for the specified
/// type.
@@ -562,7 +561,7 @@ inline LLVMTargetDataRef wrap(const Data
/// based on the DataLayout structure.
class StructLayout {
uint64_t StructSize;
- llvm::Align StructAlignment;
+ Align StructAlignment;
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
@@ -572,7 +571,7 @@ public:
uint64_t getSizeInBits() const { return 8 * StructSize; }
- llvm::Align getAlignment() const { return StructAlignment; }
+ Align getAlignment() const { return StructAlignment; }
/// Returns whether the struct has padding or not between its fields.
/// NB: Padding in nested element is not taken into account.
Modified: llvm/trunk/include/llvm/IR/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Instructions.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Instructions.h (original)
+++ llvm/trunk/include/llvm/IR/Instructions.h Fri Sep 27 05:54:21 2019
@@ -114,9 +114,9 @@ public:
return MA->value();
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Return true if this alloca is in the entry block of the function and is a
/// constant size. If so, the code generator will fold it into the
@@ -248,9 +248,9 @@ public:
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this load instruction.
AtomicOrdering getOrdering() const {
@@ -378,9 +378,9 @@ public:
return 0;
}
- // FIXME: Remove once migration to llvm::Align is over.
+ // FIXME: Remove once migration to Align is over.
void setAlignment(unsigned Align);
- void setAlignment(llvm::MaybeAlign Align);
+ void setAlignment(MaybeAlign Align);
/// Returns the ordering constraint of this store instruction.
AtomicOrdering getOrdering() const {
Modified: llvm/trunk/include/llvm/MC/MCSection.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/MC/MCSection.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/MC/MCSection.h (original)
+++ llvm/trunk/include/llvm/MC/MCSection.h Fri Sep 27 05:54:21 2019
@@ -59,7 +59,7 @@ private:
MCSymbol *Begin;
MCSymbol *End = nullptr;
/// The alignment requirement of this section.
- llvm::Align Alignment;
+ Align Alignment;
/// The section index in the assemblers section list.
unsigned Ordinal = 0;
/// The index of this section in the layout order.
@@ -119,7 +119,7 @@ public:
bool hasEnded() const;
unsigned getAlignment() const { return Alignment.value(); }
- void setAlignment(llvm::Align Value) { Alignment = Value; }
+ void setAlignment(Align Value) { Alignment = Value; }
unsigned getOrdinal() const { return Ordinal; }
void setOrdinal(unsigned Value) { Ordinal = Value; }
Modified: llvm/trunk/include/llvm/Support/Alignment.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/Alignment.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/Alignment.h (original)
+++ llvm/trunk/include/llvm/Support/Alignment.h Fri Sep 27 05:54:21 2019
@@ -76,10 +76,10 @@ public:
/// Returns a default constructed Align which corresponds to no alignment.
/// This is useful to test for unalignment as it conveys clear semantic.
- /// `if (A != llvm::Align::None())`
+ /// `if (A != Align::None())`
/// would be better than
- /// `if (A > llvm::Align(1))`
- constexpr static const Align None() { return llvm::Align(); }
+ /// `if (A > Align(1))`
+ constexpr static const Align None() { return Align(); }
};
/// Treats the value 0 as a 1, so Align is always at least 1.
@@ -142,8 +142,8 @@ inline uint64_t alignTo(uint64_t Size, M
/// Returns the offset to the next integer (mod 2**64) that is greater than
/// or equal to \p Value and is a multiple of \p Align.
-inline uint64_t offsetToAlignment(uint64_t Value, llvm::Align Align) {
- return alignTo(Value, Align) - Value;
+inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
+ return alignTo(Value, Alignment) - Value;
}
/// Returns the log2 of the alignment.
Modified: llvm/trunk/include/llvm/Support/OnDiskHashTable.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/OnDiskHashTable.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/OnDiskHashTable.h (original)
+++ llvm/trunk/include/llvm/Support/OnDiskHashTable.h Fri Sep 27 05:54:21 2019
@@ -208,8 +208,7 @@ public:
// Pad with zeros so that we can start the hashtable at an aligned address.
offset_type TableOff = Out.tell();
- uint64_t N =
- llvm::offsetToAlignment(TableOff, llvm::Align(alignof(offset_type)));
+ uint64_t N = offsetToAlignment(TableOff, Align(alignof(offset_type)));
TableOff += N;
while (N--)
LE.write<uint8_t>(0);
Modified: llvm/trunk/lib/Analysis/MemoryBuiltins.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemoryBuiltins.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemoryBuiltins.cpp (original)
+++ llvm/trunk/lib/Analysis/MemoryBuiltins.cpp Fri Sep 27 05:54:21 2019
@@ -560,9 +560,9 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
- if (Options.RoundToAlign && Align)
- return APInt(IntTyBits, alignTo(Size.getZExtValue(), llvm::Align(Align)));
+APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Alignment) {
+ if (Options.RoundToAlign && Alignment)
+ return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align(Alignment)));
return Size;
}
Modified: llvm/trunk/lib/Analysis/TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/TargetTransformInfo.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Analysis/TargetTransformInfo.cpp Fri Sep 27 05:54:21 2019
@@ -302,12 +302,11 @@ bool TargetTransformInfo::isLegalMaskedL
}
bool TargetTransformInfo::isLegalNTStore(Type *DataType,
- llvm::Align Alignment) const {
+ Align Alignment) const {
return TTIImpl->isLegalNTStore(DataType, Alignment);
}
-bool TargetTransformInfo::isLegalNTLoad(Type *DataType,
- llvm::Align Alignment) const {
+bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
return TTIImpl->isLegalNTLoad(DataType, Alignment);
}
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -163,29 +163,28 @@ static gcp_map_type &getGCMap(void *&P)
/// getGVAlignment - Return the alignment to use for the specified global
/// value. This rounds up to the preferred alignment if possible and legal.
-llvm::Align AsmPrinter::getGVAlignment(const GlobalValue *GV,
- const DataLayout &DL,
- llvm::Align InAlign) {
- llvm::Align Align;
+Align AsmPrinter::getGVAlignment(const GlobalValue *GV, const DataLayout &DL,
+ Align InAlign) {
+ Align Alignment;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- Align = llvm::Align(DL.getPreferredAlignment(GVar));
+ Alignment = Align(DL.getPreferredAlignment(GVar));
// If InAlign is specified, round it to it.
- if (InAlign > Align)
- Align = InAlign;
+ if (InAlign > Alignment)
+ Alignment = InAlign;
// If the GV has a specified alignment, take it into account.
- const llvm::MaybeAlign GVAlign(GV->getAlignment());
+ const MaybeAlign GVAlign(GV->getAlignment());
if (!GVAlign)
- return Align;
+ return Alignment;
assert(GVAlign && "GVAlign must be set");
// If the GVAlign is larger than NumBits, or if we are required to obey
// NumBits because the GV has an assigned section, obey it.
- if (*GVAlign > Align || GV->hasSection())
- Align = *GVAlign;
- return Align;
+ if (*GVAlign > Alignment || GV->hasSection())
+ Alignment = *GVAlign;
+ return Alignment;
}
AsmPrinter::AsmPrinter(TargetMachine &tm, std::unique_ptr<MCStreamer> Streamer)
@@ -507,7 +506,7 @@ void AsmPrinter::EmitGlobalVariable(cons
// If the alignment is specified, we *must* obey it. Overaligning a global
// with a specified alignment is a prompt way to break globals emitted to
// sections and expected to be contiguous (e.g. ObjC metadata).
- const llvm::Align Align = getGVAlignment(GV, DL);
+ const Align Alignment = getGVAlignment(GV, DL);
for (const HandlerInfo &HI : Handlers) {
NamedRegionTimer T(HI.TimerName, HI.TimerDescription,
@@ -523,7 +522,7 @@ void AsmPrinter::EmitGlobalVariable(cons
const bool SupportsAlignment =
getObjFileLowering().getCommDirectiveSupportsAlignment();
OutStreamer->EmitCommonSymbol(GVSym, Size,
- SupportsAlignment ? Align.value() : 0);
+ SupportsAlignment ? Alignment.value() : 0);
return;
}
@@ -538,7 +537,7 @@ void AsmPrinter::EmitGlobalVariable(cons
Size = 1; // zerofill of 0 bytes is undefined.
EmitLinkage(GV, GVSym);
// .zerofill __DATA, __bss, _foo, 400, 5
- OutStreamer->EmitZerofill(TheSection, GVSym, Size, Align.value());
+ OutStreamer->EmitZerofill(TheSection, GVSym, Size, Alignment.value());
return;
}
@@ -557,7 +556,7 @@ void AsmPrinter::EmitGlobalVariable(cons
// Prefer to simply fall back to .local / .comm in this case.
if (MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) {
// .lcomm _foo, 42
- OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Align.value());
+ OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Alignment.value());
return;
}
@@ -567,7 +566,7 @@ void AsmPrinter::EmitGlobalVariable(cons
const bool SupportsAlignment =
getObjFileLowering().getCommDirectiveSupportsAlignment();
OutStreamer->EmitCommonSymbol(GVSym, Size,
- SupportsAlignment ? Align.value() : 0);
+ SupportsAlignment ? Alignment.value() : 0);
return;
}
@@ -588,11 +587,11 @@ void AsmPrinter::EmitGlobalVariable(cons
if (GVKind.isThreadBSS()) {
TheSection = getObjFileLowering().getTLSBSSSection();
- OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Align.value());
+ OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Alignment.value());
} else if (GVKind.isThreadData()) {
OutStreamer->SwitchSection(TheSection);
- EmitAlignment(Align, GV);
+ EmitAlignment(Alignment, GV);
OutStreamer->EmitLabel(MangSym);
EmitGlobalConstant(GV->getParent()->getDataLayout(),
@@ -628,7 +627,7 @@ void AsmPrinter::EmitGlobalVariable(cons
OutStreamer->SwitchSection(TheSection);
EmitLinkage(GV, EmittedInitSym);
- EmitAlignment(Align, GV);
+ EmitAlignment(Alignment, GV);
OutStreamer->EmitLabel(EmittedInitSym);
@@ -1435,7 +1434,7 @@ bool AsmPrinter::doFinalization(Module &
OutStreamer->SwitchSection(TLOF.getDataSection());
const DataLayout &DL = M.getDataLayout();
- EmitAlignment(llvm::Align(DL.getPointerSize()));
+ EmitAlignment(Align(DL.getPointerSize()));
for (const auto &Stub : Stubs) {
OutStreamer->EmitLabel(Stub.first);
OutStreamer->EmitSymbolValue(Stub.second.getPointer(),
@@ -1462,7 +1461,7 @@ bool AsmPrinter::doFinalization(Module &
COFF::IMAGE_SCN_LNK_COMDAT,
SectionKind::getReadOnly(), Stub.first->getName(),
COFF::IMAGE_COMDAT_SELECT_ANY));
- EmitAlignment(llvm::Align(DL.getPointerSize()));
+ EmitAlignment(Align(DL.getPointerSize()));
OutStreamer->EmitSymbolAttribute(Stub.first, MCSA_Global);
OutStreamer->EmitLabel(Stub.first);
OutStreamer->EmitSymbolValue(Stub.second.getPointer(),
@@ -1763,7 +1762,7 @@ void AsmPrinter::EmitConstantPool() {
if (CurSection != CPSections[i].S) {
OutStreamer->SwitchSection(CPSections[i].S);
- EmitAlignment(llvm::Align(CPSections[i].Alignment));
+ EmitAlignment(Align(CPSections[i].Alignment));
CurSection = CPSections[i].S;
Offset = 0;
}
@@ -1810,7 +1809,7 @@ void AsmPrinter::EmitJumpTableInfo() {
OutStreamer->SwitchSection(ReadOnlySection);
}
- EmitAlignment(llvm::Align(MJTI->getEntryAlignment(DL)));
+ EmitAlignment(Align(MJTI->getEntryAlignment(DL)));
// Jump tables in code sections are marked with a data_region directive
// where that's supported.
@@ -2026,7 +2025,7 @@ void AsmPrinter::EmitXXStructorList(cons
llvm::stable_sort(Structors, [](const Structor &L, const Structor &R) {
return L.Priority < R.Priority;
});
- const llvm::Align Align = DL.getPointerPrefAlignment();
+ const Align Align = DL.getPointerPrefAlignment();
for (Structor &S : Structors) {
const TargetLoweringObjectFile &Obj = getObjFileLowering();
const MCSymbol *KeySym = nullptr;
@@ -2150,18 +2149,17 @@ void AsmPrinter::EmitLabelPlusOffset(con
// two boundary. If a global value is specified, and if that global has
// an explicit alignment requested, it will override the alignment request
// if required for correctness.
-void AsmPrinter::EmitAlignment(llvm::Align Align,
- const GlobalObject *GV) const {
+void AsmPrinter::EmitAlignment(Align Alignment, const GlobalObject *GV) const {
if (GV)
- Align = getGVAlignment(GV, GV->getParent()->getDataLayout(), Align);
+ Alignment = getGVAlignment(GV, GV->getParent()->getDataLayout(), Alignment);
- if (Align == 1)
+ if (Alignment == Align::None())
return; // 1-byte aligned: no need to emit alignment.
if (getCurrentSection()->getKind().isText())
- OutStreamer->EmitCodeAlignment(Align.value());
+ OutStreamer->EmitCodeAlignment(Alignment.value());
else
- OutStreamer->EmitValueToAlignment(Align.value());
+ OutStreamer->EmitValueToAlignment(Alignment.value());
}
//===----------------------------------------------------------------------===//
@@ -2936,9 +2934,9 @@ void AsmPrinter::EmitBasicBlockStart(con
}
// Emit an alignment directive for this block, if needed.
- const llvm::Align Align = MBB.getAlignment();
- if (Align != llvm::Align::None())
- EmitAlignment(Align);
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment != Align::None())
+ EmitAlignment(Alignment);
MCCodePaddingContext Context;
setupCodePaddingContext(MBB, Context);
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Fri Sep 27 05:54:21 2019
@@ -2509,8 +2509,8 @@ void DwarfDebug::emitDebugARanges() {
unsigned TupleSize = PtrSize * 2;
// 7.20 in the Dwarf specs requires the table to be aligned to a tuple.
- unsigned Padding = offsetToAlignment(sizeof(int32_t) + ContentSize,
- llvm::Align(TupleSize));
+ unsigned Padding =
+ offsetToAlignment(sizeof(int32_t) + ContentSize, Align(TupleSize));
ContentSize += Padding;
ContentSize += (List.size() + 1) * TupleSize;
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/EHStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/EHStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/EHStreamer.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/EHStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -426,7 +426,7 @@ MCSymbol *EHStreamer::emitExceptionTable
// EHABI). In this case LSDASection will be NULL.
if (LSDASection)
Asm->OutStreamer->SwitchSection(LSDASection);
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
// Emit the LSDA.
MCSymbol *GCCETSym =
@@ -602,11 +602,11 @@ MCSymbol *EHStreamer::emitExceptionTable
}
if (HaveTTData) {
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
emitTypeInfos(TTypeEncoding, TTBaseLabel);
}
- Asm->EmitAlignment(llvm::Align(4));
+ Asm->EmitAlignment(Align(4));
return GCCETSym;
}
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -72,7 +72,7 @@ void ErlangGCPrinter::finishAssembly(Mod
**/
// Align to address width.
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
// Emit PointCount.
OS.AddComment("safe point count");
Modified: llvm/trunk/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -129,7 +129,7 @@ void OcamlGCMetadataPrinter::finishAssem
report_fatal_error(" Too much descriptor for ocaml GC");
}
AP.emitInt16(NumDescriptors);
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
for (GCModuleInfo::FuncInfoVec::iterator I = Info.funcinfo_begin(),
IE = Info.funcinfo_end();
@@ -180,7 +180,7 @@ void OcamlGCMetadataPrinter::finishAssem
AP.emitInt16(K->StackOffset);
}
- AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8));
}
}
}
Modified: llvm/trunk/lib/CodeGen/BranchRelaxation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/BranchRelaxation.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/BranchRelaxation.cpp (original)
+++ llvm/trunk/lib/CodeGen/BranchRelaxation.cpp Fri Sep 27 05:54:21 2019
@@ -65,17 +65,17 @@ class BranchRelaxation : public MachineF
/// block.
unsigned postOffset(const MachineBasicBlock &MBB) const {
const unsigned PO = Offset + Size;
- const llvm::Align Align = MBB.getAlignment();
- if (Align == 1)
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment == 1)
return PO;
- const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
- if (Align <= ParentAlign)
- return PO + offsetToAlignment(PO, Align);
+ const Align ParentAlign = MBB.getParent()->getAlignment();
+ if (Alignment <= ParentAlign)
+ return PO + offsetToAlignment(PO, Alignment);
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
- return PO + Align.value() + offsetToAlignment(PO, Align);
+ return PO + Alignment.value() + offsetToAlignment(PO, Alignment);
}
};
Modified: llvm/trunk/lib/CodeGen/CallingConvLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CallingConvLower.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CallingConvLower.cpp (original)
+++ llvm/trunk/lib/CodeGen/CallingConvLower.cpp Fri Sep 27 05:54:21 2019
@@ -43,17 +43,18 @@ CCState::CCState(CallingConv::ID CC, boo
void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, int MinSize,
int MinAlignment, ISD::ArgFlagsTy ArgFlags) {
- llvm::Align MinAlign(MinAlignment);
- llvm::Align Align(ArgFlags.getByValAlign());
+ Align MinAlign(MinAlignment);
+ Align Alignment(ArgFlags.getByValAlign());
unsigned Size = ArgFlags.getByValSize();
if (MinSize > (int)Size)
Size = MinSize;
- if (MinAlign > Align)
- Align = MinAlign;
- ensureMaxAlignment(Align);
- MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align.value());
+ if (MinAlign > Alignment)
+ Alignment = MinAlign;
+ ensureMaxAlignment(Alignment);
+ MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size,
+ Alignment.value());
Size = unsigned(alignTo(Size, MinAlign));
- unsigned Offset = AllocateStack(Size, Align.value());
+ unsigned Offset = AllocateStack(Size, Alignment.value());
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
@@ -198,7 +199,7 @@ static bool isValueTypeInRegForCC(Callin
void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
MVT VT, CCAssignFn Fn) {
unsigned SavedStackOffset = StackOffset;
- llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign;
+ Align SavedMaxStackArgAlign = MaxStackArgAlign;
unsigned NumLocs = Locs.size();
// Set the 'inreg' flag if it is used for this calling convention.
Modified: llvm/trunk/lib/CodeGen/GlobalISel/CombinerHelper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/CombinerHelper.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/CombinerHelper.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/CombinerHelper.cpp Fri Sep 27 05:54:21 2019
@@ -866,7 +866,7 @@ bool CombinerHelper::optimizeMemcpy(Mach
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Align = MinAlign(DstAlign, SrcAlign);
+ unsigned Alignment = MinAlign(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -885,7 +885,8 @@ bool CombinerHelper::optimizeMemcpy(Mach
MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign,
+ MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
+ SrcAlign,
/*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
/*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(),
@@ -901,16 +902,16 @@ bool CombinerHelper::optimizeMemcpy(Mach
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < NewAlign)
MFI.setObjectAlignment(FI, NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
@@ -973,7 +974,7 @@ bool CombinerHelper::optimizeMemmove(Mac
bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF);
- unsigned Align = MinAlign(DstAlign, SrcAlign);
+ unsigned Alignment = MinAlign(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@@ -991,7 +992,8 @@ bool CombinerHelper::optimizeMemmove(Mac
// to a bug in it's findOptimalMemOpLowering implementation. For now do the
// same thing here.
if (!findGISelOptimalMemOpLowering(
- MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign,
+ MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment),
+ SrcAlign,
/*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
/*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(),
@@ -1007,16 +1009,16 @@ bool CombinerHelper::optimizeMemmove(Mac
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < NewAlign)
MFI.setObjectAlignment(FI, NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
Modified: llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp Fri Sep 27 05:54:21 2019
@@ -641,7 +641,7 @@ bool MIParser::parseBasicBlockDefinition
return error(Loc, Twine("redefinition of machine basic block with id #") +
Twine(ID));
if (Alignment)
- MBB->setAlignment(llvm::Align(Alignment));
+ MBB->setAlignment(Align(Alignment));
if (HasAddressTaken)
MBB->setHasAddressTaken();
MBB->setIsEHPad(IsLandingPad);
Modified: llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp Fri Sep 27 05:54:21 2019
@@ -393,7 +393,7 @@ MIRParserImpl::initializeMachineFunction
}
if (YamlMF.Alignment)
- MF.setAlignment(llvm::Align(YamlMF.Alignment));
+ MF.setAlignment(Align(YamlMF.Alignment));
MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
MF.setHasWinCFI(YamlMF.HasWinCFI);
Modified: llvm/trunk/lib/CodeGen/MIRPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRPrinter.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -629,7 +629,7 @@ void MIPrinter::print(const MachineBasic
OS << "landing-pad";
HasAttributes = true;
}
- if (MBB.getAlignment() != llvm::Align::None()) {
+ if (MBB.getAlignment() != Align::None()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << MBB.getAlignment().value();
HasAttributes = true;
Modified: llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBasicBlock.cpp Fri Sep 27 05:54:21 2019
@@ -326,7 +326,7 @@ void MachineBasicBlock::print(raw_ostrea
OS << "landing-pad";
HasAttributes = true;
}
- if (getAlignment() != llvm::Align::None()) {
+ if (getAlignment() != Align::None()) {
OS << (HasAttributes ? ", " : " (");
OS << "align " << Log2(getAlignment());
HasAttributes = true;
Modified: llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineBlockPlacement.cpp Fri Sep 27 05:54:21 2019
@@ -2807,7 +2807,7 @@ void MachineBlockPlacement::alignBlocks(
if (!L)
continue;
- const llvm::Align Align = TLI->getPrefLoopAlignment(L);
+ const Align Align = TLI->getPrefLoopAlignment(L);
if (Align == 1)
continue; // Don't care about loop alignment.
@@ -3109,14 +3109,14 @@ bool MachineBlockPlacement::runOnMachine
if (AlignAllBlock)
// Align all of the blocks in the function to a specific alignment.
for (MachineBasicBlock &MBB : MF)
- MBB.setAlignment(llvm::Align(1ULL << AlignAllBlock));
+ MBB.setAlignment(Align(1ULL << AlignAllBlock));
else if (AlignAllNonFallThruBlocks) {
// Align all of the blocks that have no fall-through predecessors to a
// specific alignment.
for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) {
auto LayoutPred = std::prev(MBI);
if (!LayoutPred->isSuccessor(&*MBI))
- MBI->setAlignment(llvm::Align(1ULL << AlignAllNonFallThruBlocks));
+ MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks));
}
}
if (ViewBlockLayoutWithBFI != GVDT_None &&
Modified: llvm/trunk/lib/CodeGen/MachineFrameInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineFrameInfo.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineFrameInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineFrameInfo.cpp Fri Sep 27 05:54:21 2019
@@ -28,26 +28,26 @@
using namespace llvm;
-void MachineFrameInfo::ensureMaxAlignment(llvm::Align Align) {
+void MachineFrameInfo::ensureMaxAlignment(Align Alignment) {
if (!StackRealignable)
- assert(Align <= StackAlignment &&
- "For targets without stack realignment, Align is out of limit!");
- if (MaxAlignment < Align) MaxAlignment = Align;
+ assert(Alignment <= StackAlignment &&
+ "For targets without stack realignment, Alignment is out of limit!");
+ if (MaxAlignment < Alignment)
+ MaxAlignment = Alignment;
}
/// Clamp the alignment if requested and emit a warning.
-static inline llvm::Align clampStackAlignment(bool ShouldClamp,
- llvm::Align Align,
- llvm::Align StackAlign) {
- if (!ShouldClamp || Align <= StackAlign)
- return Align;
- LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Align.value()
- << " exceeds the stack alignment " << StackAlign.value()
+static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment,
+ Align StackAlignment) {
+ if (!ShouldClamp || Alignment <= StackAlignment)
+ return Alignment;
+ LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value()
+ << " exceeds the stack alignment " << StackAlignment.value()
<< " when stack realignment is off" << '\n');
- return StackAlign;
+ return StackAlignment;
}
-int MachineFrameInfo::CreateStackObject(uint64_t Size, llvm::Align Alignment,
+int MachineFrameInfo::CreateStackObject(uint64_t Size, Align Alignment,
bool IsSpillSlot,
const AllocaInst *Alloca,
uint8_t StackID) {
@@ -62,8 +62,7 @@ int MachineFrameInfo::CreateStackObject(
return Index;
}
-int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
- llvm::Align Alignment) {
+int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, Align Alignment) {
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
CreateStackObject(Size, Alignment, true);
int Index = (int)Objects.size() - NumFixedObjects - 1;
@@ -71,7 +70,7 @@ int MachineFrameInfo::CreateSpillStackOb
return Index;
}
-int MachineFrameInfo::CreateVariableSizedObject(llvm::Align Alignment,
+int MachineFrameInfo::CreateVariableSizedObject(Align Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
@@ -89,8 +88,8 @@ int MachineFrameInfo::CreateFixedObject(
// object is 16-byte aligned. Note that unlike the non-fixed case, if the
// stack needs realignment, we can't assume that the stack will in fact be
// aligned.
- llvm::Align Alignment = commonAlignment(
- ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset);
+ Align Alignment =
+ commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset);
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
Objects.insert(Objects.begin(),
StackObject(Size, Alignment, SPOffset, IsImmutable,
@@ -102,8 +101,8 @@ int MachineFrameInfo::CreateFixedObject(
int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
int64_t SPOffset,
bool IsImmutable) {
- llvm::Align Alignment = commonAlignment(
- ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset);
+ Align Alignment =
+ commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset);
Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
Objects.insert(Objects.begin(),
StackObject(Size, Alignment, SPOffset, IsImmutable,
Modified: llvm/trunk/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineFunction.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineFunction.cpp Fri Sep 27 05:54:21 2019
@@ -182,7 +182,7 @@ void MachineFunction::init() {
STI->getTargetLowering()->getPrefFunctionAlignment());
if (AlignAllFunctions)
- Alignment = llvm::Align(1ULL << AlignAllFunctions);
+ Alignment = Align(1ULL << AlignAllFunctions);
JumpTableInfo = nullptr;
Modified: llvm/trunk/lib/CodeGen/PatchableFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PatchableFunction.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PatchableFunction.cpp (original)
+++ llvm/trunk/lib/CodeGen/PatchableFunction.cpp Fri Sep 27 05:54:21 2019
@@ -78,7 +78,7 @@ bool PatchableFunction::runOnMachineFunc
MIB.add(MO);
FirstActualI->eraseFromParent();
- MF.ensureAlignment(llvm::Align(16));
+ MF.ensureAlignment(Align(16));
return true;
}
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Sep 27 05:54:21 2019
@@ -1898,7 +1898,7 @@ SDValue SelectionDAG::expandVAArg(SDNode
EVT VT = Node->getValueType(0);
SDValue Tmp1 = Node->getOperand(0);
SDValue Tmp2 = Node->getOperand(1);
- const llvm::MaybeAlign MA(Node->getConstantOperandVal(3));
+ const MaybeAlign MA(Node->getConstantOperandVal(3));
SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
Tmp2, MachinePointerInfo(V));
@@ -5757,7 +5757,7 @@ static void chainLoadsAndStoresForMemcpy
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src,
- uint64_t Size, unsigned Align,
+ uint64_t Size, unsigned Alignment,
bool isVol, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) {
@@ -5782,15 +5782,15 @@ static SDValue getMemcpyLoadsAndStores(S
if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
- if (Align > SrcAlign)
- SrcAlign = Align;
+ if (Alignment > SrcAlign)
+ SrcAlign = Alignment;
ConstantDataArraySlice Slice;
bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
if (!TLI.findOptimalMemOpLowering(
- MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align),
+ MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment),
(isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false,
/*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant,
/*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(),
@@ -5805,15 +5805,15 @@ static SDValue getMemcpyLoadsAndStores(S
// realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF))
- while (NewAlign > Align &&
- DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign)))
- NewAlign /= 2;
+ while (NewAlign > Alignment &&
+ DL.exceedsNaturalStackAlignment(Align(NewAlign)))
+ NewAlign /= 2;
- if (NewAlign > Align) {
+ if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
MFI.setObjectAlignment(FI->getIndex(), NewAlign);
- Align = NewAlign;
+ Alignment = NewAlign;
}
}
@@ -5856,10 +5856,9 @@ static SDValue getMemcpyLoadsAndStores(S
}
Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
if (Value.getNode()) {
- Store = DAG.getStore(Chain, dl, Value,
- DAG.getMemBasePlusOffset(Dst, DstOff, dl),
- DstPtrInfo.getWithOffset(DstOff), Align,
- MMOFlags);
+ Store = DAG.getStore(
+ Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
+ DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
OutChains.push_back(Store);
}
}
@@ -5887,7 +5886,7 @@ static SDValue getMemcpyLoadsAndStores(S
Store = DAG.getTruncStore(
Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
- DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
+ DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
OutStoreChains.push_back(Store);
}
SrcOff += VTSize;
Modified: llvm/trunk/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp (original)
+++ llvm/trunk/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp Fri Sep 27 05:54:21 2019
@@ -729,17 +729,17 @@ Error RuntimeDyldImpl::emitCommonSymbols
// Assign the address of each symbol
for (auto &Sym : SymbolsToAllocate) {
- uint32_t Align = Sym.getAlignment();
+ uint32_t Alignment = Sym.getAlignment();
uint64_t Size = Sym.getCommonSize();
StringRef Name;
if (auto NameOrErr = Sym.getName())
Name = *NameOrErr;
else
return NameOrErr.takeError();
- if (Align) {
+ if (Alignment) {
// This symbol has an alignment requirement.
uint64_t AlignOffset =
- offsetToAlignment((uint64_t)Addr, llvm::Align(Align));
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
Addr += AlignOffset;
Offset += AlignOffset;
}
Modified: llvm/trunk/lib/IR/DataLayout.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/DataLayout.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/IR/DataLayout.cpp (original)
+++ llvm/trunk/lib/IR/DataLayout.cpp Fri Sep 27 05:54:21 2019
@@ -51,7 +51,7 @@ StructLayout::StructLayout(StructType *S
// Loop over each of the elements, placing them in memory.
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Type *Ty = ST->getElementType(i);
- const llvm::Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty));
+ const Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty));
// Add padding if necessary to align the data element properly.
if (!isAligned(TyAlign, StructSize)) {
@@ -98,10 +98,8 @@ unsigned StructLayout::getElementContain
// LayoutAlignElem, LayoutAlign support
//===----------------------------------------------------------------------===//
-LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type,
- llvm::Align abi_align,
- llvm::Align pref_align,
- uint32_t bit_width) {
+LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
LayoutAlignElem retval;
retval.AlignType = align_type;
@@ -123,10 +121,8 @@ LayoutAlignElem::operator==(const Layout
// PointerAlignElem, PointerAlign support
//===----------------------------------------------------------------------===//
-PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace,
- llvm::Align ABIAlign,
- llvm::Align PrefAlign,
- uint32_t TypeByteWidth,
+PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth) {
assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
PointerAlignElem retval;
@@ -160,19 +156,18 @@ const char *DataLayout::getManglingCompo
}
static const LayoutAlignElem DefaultAlignments[] = {
- {INTEGER_ALIGN, 1, llvm::Align(1), llvm::Align(1)}, // i1
- {INTEGER_ALIGN, 8, llvm::Align(1), llvm::Align(1)}, // i8
- {INTEGER_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // i16
- {INTEGER_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // i32
- {INTEGER_ALIGN, 64, llvm::Align(4), llvm::Align(8)}, // i64
- {FLOAT_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // half
- {FLOAT_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // float
- {FLOAT_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // double
- {FLOAT_ALIGN, 128, llvm::Align(16), llvm::Align(16)}, // ppcf128, quad, ...
- {VECTOR_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // v2i32, v1i64, ...
- {VECTOR_ALIGN, 128, llvm::Align(16),
- llvm::Align(16)}, // v16i8, v8i16, v4i32, ...
- {AGGREGATE_ALIGN, 0, llvm::Align(1), llvm::Align(8)} // struct
+ {INTEGER_ALIGN, 1, Align(1), Align(1)}, // i1
+ {INTEGER_ALIGN, 8, Align(1), Align(1)}, // i8
+ {INTEGER_ALIGN, 16, Align(2), Align(2)}, // i16
+ {INTEGER_ALIGN, 32, Align(4), Align(4)}, // i32
+ {INTEGER_ALIGN, 64, Align(4), Align(8)}, // i64
+ {FLOAT_ALIGN, 16, Align(2), Align(2)}, // half
+ {FLOAT_ALIGN, 32, Align(4), Align(4)}, // float
+ {FLOAT_ALIGN, 64, Align(8), Align(8)}, // double
+ {FLOAT_ALIGN, 128, Align(16), Align(16)}, // ppcf128, quad, ...
+ {VECTOR_ALIGN, 64, Align(8), Align(8)}, // v2i32, v1i64, ...
+ {VECTOR_ALIGN, 128, Align(16), Align(16)}, // v16i8, v8i16, v4i32, ...
+ {AGGREGATE_ALIGN, 0, Align(1), Align(8)} // struct
};
void DataLayout::reset(StringRef Desc) {
@@ -193,7 +188,7 @@ void DataLayout::reset(StringRef Desc) {
setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
E.TypeBitWidth);
}
- setPointerAlignment(0, llvm::Align(8), llvm::Align(8), 8, 8);
+ setPointerAlignment(0, Align(8), Align(8), 8, 8);
parseSpecifier(Desc);
}
@@ -486,8 +481,8 @@ DataLayout::findAlignmentLowerBound(Alig
});
}
-void DataLayout::setAlignment(AlignTypeEnum align_type, llvm::Align abi_align,
- llvm::Align pref_align, uint32_t bit_width) {
+void DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
// AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as
// uint16_t, it is unclear if there are requirements for alignment to be less
// than 2^16 other than storage. In the meantime we leave the restriction as
@@ -520,9 +515,8 @@ DataLayout::findPointerLowerBound(uint32
});
}
-void DataLayout::setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign,
- llvm::Align PrefAlign,
- uint32_t TypeByteWidth,
+void DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth) {
if (PrefAlign < ABIAlign)
report_fatal_error(
@@ -542,9 +536,8 @@ void DataLayout::setPointerAlignment(uin
/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
/// preferred if ABIInfo = false) the layout wants for the specified datatype.
-llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
- uint32_t BitWidth, bool ABIInfo,
- Type *Ty) const {
+Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, uint32_t BitWidth,
+ bool ABIInfo, Type *Ty) const {
AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth);
// See if we found an exact match. Of if we are looking for an integer type,
// but don't have an exact match take the next largest integer. This is where
@@ -563,10 +556,11 @@ llvm::Align DataLayout::getAlignmentInfo
} else if (AlignType == VECTOR_ALIGN) {
// By default, use natural alignment for vector types. This is consistent
// with what clang and llvm-gcc do.
- unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
- Align *= cast<VectorType>(Ty)->getNumElements();
- Align = PowerOf2Ceil(Align);
- return llvm::Align(Align);
+ unsigned Alignment =
+ getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Alignment *= cast<VectorType>(Ty)->getNumElements();
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
// If we still couldn't find a reasonable default alignment, fall back
@@ -575,9 +569,9 @@ llvm::Align DataLayout::getAlignmentInfo
// approximation of reality, and if the user wanted something less
// less conservative, they should have specified it explicitly in the data
// layout.
- unsigned Align = getTypeStoreSize(Ty);
- Align = PowerOf2Ceil(Align);
- return llvm::Align(Align);
+ unsigned Alignment = getTypeStoreSize(Ty);
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
namespace {
@@ -638,7 +632,7 @@ const StructLayout *DataLayout::getStruc
return L;
}
-llvm::Align DataLayout::getPointerABIAlignment(unsigned AS) const {
+Align DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -647,7 +641,7 @@ llvm::Align DataLayout::getPointerABIAli
return I->ABIAlign;
}
-llvm::Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
+Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -704,7 +698,7 @@ unsigned DataLayout::getIndexTypeSizeInB
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-llvm::Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
AlignTypeEnum AlignType;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
@@ -723,12 +717,11 @@ llvm::Align DataLayout::getAlignment(Typ
case Type::StructTyID: {
// Packed structure types always have an ABI alignment of one.
if (cast<StructType>(Ty)->isPacked() && abi_or_pref)
- return llvm::Align::None();
+ return Align::None();
// Get the layout annotation... which is lazily created on demand.
const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
- const llvm::Align Align =
- getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
+ const Align Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
return std::max(Align, Layout->getAlignment());
}
case Type::IntegerTyID:
@@ -761,7 +754,7 @@ unsigned DataLayout::getABITypeAlignment
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
-llvm::Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
}
Modified: llvm/trunk/lib/IR/Instructions.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Instructions.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Instructions.cpp (original)
+++ llvm/trunk/lib/IR/Instructions.cpp Fri Sep 27 05:54:21 2019
@@ -1248,7 +1248,7 @@ void AllocaInst::setAlignment(unsigned A
setAlignment(llvm::MaybeAlign(Align));
}
-void AllocaInst::setAlignment(llvm::MaybeAlign Align) {
+void AllocaInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
@@ -1343,7 +1343,7 @@ void LoadInst::setAlignment(unsigned Ali
setAlignment(llvm::MaybeAlign(Align));
}
-void LoadInst::setAlignment(llvm::MaybeAlign Align) {
+void LoadInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
@@ -1430,7 +1430,7 @@ void StoreInst::setAlignment(unsigned Al
setAlignment(llvm::MaybeAlign(Align));
}
-void StoreInst::setAlignment(llvm::MaybeAlign Align) {
+void StoreInst::setAlignment(MaybeAlign Align) {
assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
Modified: llvm/trunk/lib/IR/Value.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Value.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Value.cpp (original)
+++ llvm/trunk/lib/IR/Value.cpp Fri Sep 27 05:54:21 2019
@@ -667,7 +667,7 @@ unsigned Value::getPointerAlignment(cons
assert(getType()->isPointerTy() && "must be pointer");
if (auto *GO = dyn_cast<GlobalObject>(this)) {
if (isa<Function>(GO)) {
- const llvm::MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
+ const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
const unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0;
switch (DL.getFunctionPtrAlignType()) {
case DataLayout::FunctionPtrAlignType::Independent:
Modified: llvm/trunk/lib/MC/ELFObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/ELFObjectWriter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/ELFObjectWriter.cpp (original)
+++ llvm/trunk/lib/MC/ELFObjectWriter.cpp Fri Sep 27 05:54:21 2019
@@ -337,7 +337,7 @@ public:
} // end anonymous namespace
void ELFWriter::align(unsigned Alignment) {
- uint64_t Padding = offsetToAlignment(W.OS.tell(), llvm::Align(Alignment));
+ uint64_t Padding = offsetToAlignment(W.OS.tell(), Align(Alignment));
W.OS.write_zeros(Padding);
}
@@ -638,7 +638,7 @@ void ELFWriter::computeSymbolTable(
unsigned EntrySize = is64Bit() ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
MCSectionELF *SymtabSection =
Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0, EntrySize, "");
- SymtabSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ SymtabSection->setAlignment(is64Bit() ? Align(8) : Align(4));
SymbolTableIndex = addToSectionTable(SymtabSection);
align(SymtabSection->getAlignment());
@@ -736,7 +736,7 @@ void ELFWriter::computeSymbolTable(
MCSectionELF *SymtabShndxSection =
Ctx.getELFSection(".symtab_shndx", ELF::SHT_SYMTAB_SHNDX, 0, 4, "");
SymtabShndxSectionIndex = addToSectionTable(SymtabShndxSection);
- SymtabShndxSection->setAlignment(llvm::Align(4));
+ SymtabShndxSection->setAlignment(Align(4));
}
ArrayRef<std::string> FileNames = Asm.getFileNames();
@@ -824,7 +824,7 @@ MCSectionELF *ELFWriter::createRelocatio
MCSectionELF *RelaSection = Ctx.createELFRelSection(
RelaSectionName, hasRelocationAddend() ? ELF::SHT_RELA : ELF::SHT_REL,
Flags, EntrySize, Sec.getGroup(), &Sec);
- RelaSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ RelaSection->setAlignment(is64Bit() ? Align(8) : Align(4));
return RelaSection;
}
@@ -911,7 +911,7 @@ void ELFWriter::writeSectionData(const M
Section.setFlags(Section.getFlags() | ELF::SHF_COMPRESSED);
// Alignment field should reflect the requirements of
// the compressed section header.
- Section.setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4));
+ Section.setAlignment(is64Bit() ? Align(8) : Align(4));
} else {
// Add "z" prefix to section name. This is zlib-gnu style.
MC.renameELFSection(&Section, (".z" + SectionName.drop_front(1)).str());
@@ -1135,7 +1135,7 @@ uint64_t ELFWriter::writeObject(MCAssemb
if (!GroupIdx) {
MCSectionELF *Group = Ctx.createELFGroupSection(SignatureSymbol);
GroupIdx = addToSectionTable(Group);
- Group->setAlignment(llvm::Align(4));
+ Group->setAlignment(Align(4));
Groups.push_back(Group);
}
std::vector<const MCSectionELF *> &Members =
Modified: llvm/trunk/lib/MC/MCAssembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/MCAssembler.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/MCAssembler.cpp (original)
+++ llvm/trunk/lib/MC/MCAssembler.cpp Fri Sep 27 05:54:21 2019
@@ -322,7 +322,7 @@ uint64_t MCAssembler::computeFragmentSiz
case MCFragment::FT_Align: {
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
unsigned Offset = Layout.getFragmentOffset(&AF);
- unsigned Size = offsetToAlignment(Offset, llvm::Align(AF.getAlignment()));
+ unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment()));
// Insert extra Nops for code alignment if the target define
// shouldInsertExtraNopBytesForCodeAlign target hook.
Modified: llvm/trunk/lib/MC/MCELFStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/MCELFStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/MCELFStreamer.cpp (original)
+++ llvm/trunk/lib/MC/MCELFStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -139,7 +139,7 @@ static void setSectionAlignmentForBundli
MCSection *Section) {
if (Section && Assembler.isBundlingEnabled() && Section->hasInstructions() &&
Section->getAlignment() < Assembler.getBundleAlignSize())
- Section->setAlignment(llvm::Align(Assembler.getBundleAlignSize()));
+ Section->setAlignment(Align(Assembler.getBundleAlignSize()));
}
void MCELFStreamer::ChangeSection(MCSection *Section,
@@ -309,7 +309,7 @@ void MCELFStreamer::EmitCommonSymbol(MCS
// Update the maximum alignment of the section if necessary.
if (ByteAlignment > Section.getAlignment())
- Section.setAlignment(llvm::Align(ByteAlignment));
+ Section.setAlignment(Align(ByteAlignment));
SwitchSection(P.first, P.second);
} else {
Modified: llvm/trunk/lib/MC/MCObjectStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/MCObjectStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/MCObjectStreamer.cpp (original)
+++ llvm/trunk/lib/MC/MCObjectStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -539,7 +539,7 @@ void MCObjectStreamer::EmitValueToAlignm
// Update the maximum alignment on the current section if necessary.
MCSection *CurSec = getCurrentSectionOnly();
if (ByteAlignment > CurSec->getAlignment())
- CurSec->setAlignment(llvm::Align(ByteAlignment));
+ CurSec->setAlignment(Align(ByteAlignment));
}
void MCObjectStreamer::EmitCodeAlignment(unsigned ByteAlignment,
Modified: llvm/trunk/lib/MC/MCWinCOFFStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/MCWinCOFFStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/MCWinCOFFStreamer.cpp (original)
+++ llvm/trunk/lib/MC/MCWinCOFFStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -192,7 +192,7 @@ void MCWinCOFFStreamer::EmitCOFFSafeSEH(
MCSection *SXData = getContext().getObjectFileInfo()->getSXDataSection();
getAssembler().registerSection(*SXData);
if (SXData->getAlignment() < 4)
- SXData->setAlignment(llvm::Align(4));
+ SXData->setAlignment(Align(4));
new MCSymbolIdFragment(Symbol, SXData);
@@ -209,7 +209,7 @@ void MCWinCOFFStreamer::EmitCOFFSymbolIn
MCSection *Sec = getCurrentSectionOnly();
getAssembler().registerSection(*Sec);
if (Sec->getAlignment() < 4)
- Sec->setAlignment(llvm::Align(4));
+ Sec->setAlignment(Align(4));
new MCSymbolIdFragment(Symbol, getCurrentSectionOnly());
Modified: llvm/trunk/lib/MC/MachObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/MC/MachObjectWriter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/MC/MachObjectWriter.cpp (original)
+++ llvm/trunk/lib/MC/MachObjectWriter.cpp Fri Sep 27 05:54:21 2019
@@ -127,7 +127,7 @@ uint64_t MachObjectWriter::getPaddingSiz
const MCSection &NextSec = *Layout.getSectionOrder()[Next];
if (NextSec.isVirtualSection())
return 0;
- return offsetToAlignment(EndAddr, llvm::Align(NextSec.getAlignment()));
+ return offsetToAlignment(EndAddr, Align(NextSec.getAlignment()));
}
void MachObjectWriter::writeHeader(MachO::HeaderFileType Type,
@@ -445,8 +445,8 @@ void MachObjectWriter::writeLinkerOption
}
// Pad to a multiple of the pointer size.
- W.OS.write_zeros(offsetToAlignment(BytesWritten, is64Bit() ? llvm::Align(8)
- : llvm::Align(4)));
+ W.OS.write_zeros(
+ offsetToAlignment(BytesWritten, is64Bit() ? Align(8) : Align(4)));
assert(W.OS.tell() - Start == Size);
}
@@ -835,7 +835,7 @@ uint64_t MachObjectWriter::writeObject(M
//
// FIXME: Is this machine dependent?
unsigned SectionDataPadding =
- offsetToAlignment(SectionDataFileSize, llvm::Align(4));
+ offsetToAlignment(SectionDataFileSize, Align(4));
SectionDataFileSize += SectionDataPadding;
// Write the prolog, starting with the header and load command...
@@ -1000,8 +1000,8 @@ uint64_t MachObjectWriter::writeObject(M
#endif
Asm.getLOHContainer().emit(*this, Layout);
// Pad to a multiple of the pointer size.
- W.OS.write_zeros(offsetToAlignment(LOHRawSize, is64Bit() ? llvm::Align(8)
- : llvm::Align(4)));
+ W.OS.write_zeros(
+ offsetToAlignment(LOHRawSize, is64Bit() ? Align(8) : Align(4)));
assert(W.OS.tell() - Start == LOHSize);
}
Modified: llvm/trunk/lib/Object/ArchiveWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Object/ArchiveWriter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Object/ArchiveWriter.cpp (original)
+++ llvm/trunk/lib/Object/ArchiveWriter.cpp Fri Sep 27 05:54:21 2019
@@ -177,7 +177,7 @@ printBSDMemberHeader(raw_ostream &Out, u
unsigned UID, unsigned GID, unsigned Perms, uint64_t Size) {
uint64_t PosAfterHeader = Pos + 60 + Name.size();
// Pad so that even 64 bit object files are aligned.
- unsigned Pad = offsetToAlignment(PosAfterHeader, llvm::Align(8));
+ unsigned Pad = offsetToAlignment(PosAfterHeader, Align(8));
unsigned NameWithPadding = Name.size() + Pad;
printWithSpacePadding(Out, Twine("#1/") + Twine(NameWithPadding), 16);
printRestOfMemberHeader(Out, ModTime, UID, GID, Perms,
@@ -244,7 +244,7 @@ struct MemberData {
static MemberData computeStringTable(StringRef Names) {
unsigned Size = Names.size();
- unsigned Pad = offsetToAlignment(Size, llvm::Align(2));
+ unsigned Pad = offsetToAlignment(Size, Align(2));
std::string Header;
raw_string_ostream Out(Header);
printWithSpacePadding(Out, "//", 48);
@@ -308,7 +308,7 @@ static void writeSymbolTable(raw_ostream
// least 4-byte aligned for 32-bit content. Opt for the larger encoding
// uniformly.
// We do this for all bsd formats because it simplifies aligning members.
- const llvm::Align Alignment(isBSDLike(Kind) ? 8 : 2);
+ const Align Alignment(isBSDLike(Kind) ? 8 : 2);
unsigned Pad = offsetToAlignment(Size, Alignment);
Size += Pad;
@@ -465,9 +465,9 @@ computeMemberData(raw_ostream &StringTab
// uniformly. This matches the behaviour with cctools and ensures that ld64
// is happy with archives that we generate.
unsigned MemberPadding =
- isDarwin(Kind) ? offsetToAlignment(Data.size(), llvm::Align(8)) : 0;
+ isDarwin(Kind) ? offsetToAlignment(Data.size(), Align(8)) : 0;
unsigned TailPadding =
- offsetToAlignment(Data.size() + MemberPadding, llvm::Align(2));
+ offsetToAlignment(Data.size() + MemberPadding, Align(2));
StringRef Padding = StringRef(PaddingData, MemberPadding + TailPadding);
sys::TimePoint<std::chrono::seconds> ModTime;
Modified: llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -711,7 +711,7 @@ void AArch64AsmPrinter::EmitJumpTableInf
if (JTBBs.empty()) continue;
unsigned Size = AFI->getJumpTableEntrySize(JTI);
- EmitAlignment(llvm::Align(Size));
+ EmitAlignment(Align(Size));
OutStreamer->EmitLabel(GetJTISymbol(JTI));
for (auto *JTBB : JTBBs)
Modified: llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.cpp Fri Sep 27 05:54:21 2019
@@ -40,10 +40,10 @@ static bool finishStackBlock(SmallVector
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
CCState &State, unsigned SlotAlign) {
unsigned Size = LocVT.getSizeInBits() / 8;
- const llvm::Align StackAlign =
+ const Align StackAlign =
State.getMachineFunction().getDataLayout().getStackAlignment();
- const llvm::Align OrigAlign(ArgFlags.getOrigAlign());
- const llvm::Align Align = std::min(OrigAlign, StackAlign);
+ const Align OrigAlign(ArgFlags.getOrigAlign());
+ const Align Align = std::min(OrigAlign, StackAlign);
for (auto &It : PendingMembers) {
It.convertToMem(State.AllocateStack(
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -641,11 +641,10 @@ AArch64TargetLowering::AArch64TargetLowe
EnableExtLdPromotion = true;
// Set required alignment.
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
// Set preferred alignments.
- setPrefLoopAlignment(llvm::Align(1ULL << STI.getPrefLoopLogAlignment()));
- setPrefFunctionAlignment(
- llvm::Align(1ULL << STI.getPrefFunctionLogAlignment()));
+ setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment()));
+ setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment()));
// Only change the limit for entries in a jump table if specified by
// the sub target, but not at the command line.
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -229,7 +229,7 @@ void AMDGPUAsmPrinter::EmitFunctionBodyE
// alignment.
Streamer.EmitValueToAlignment(64, 0, 1, 0);
if (ReadOnlySection.getAlignment() < 64)
- ReadOnlySection.setAlignment(llvm::Align(64));
+ ReadOnlySection.setAlignment(Align(64));
const MCSubtargetInfo &STI = MF->getSubtarget();
@@ -417,7 +417,7 @@ bool AMDGPUAsmPrinter::runOnMachineFunct
// The starting address of all shader programs must be 256 bytes aligned.
// Regular functions just need the basic required instruction alignment.
- MF.setAlignment(MFI->isEntryFunction() ? llvm::Align(256) : llvm::Align(4));
+ MF.setAlignment(MFI->isEntryFunction() ? Align(256) : Align(4));
SetupMachineFunction(MF);
Modified: llvm/trunk/lib/Target/AMDGPU/R600AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600AsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600AsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -104,7 +104,7 @@ bool R600AsmPrinter::runOnMachineFunctio
// Functions needs to be cacheline (256B) aligned.
- MF.ensureAlignment(llvm::Align(256));
+ MF.ensureAlignment(Align(256));
SetupMachineFunction(MF);
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -10684,9 +10684,9 @@ void SITargetLowering::computeKnownBitsF
Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex());
}
-llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
- const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
- const llvm::Align CacheLineAlign = llvm::Align(64);
+Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+ const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML);
+ const Align CacheLineAlign = Align(64);
// Pre-GFX10 target did not benefit from loop alignment
if (!ML || DisableLoopAlignment ||
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h Fri Sep 27 05:54:21 2019
@@ -384,7 +384,7 @@ public:
unsigned Depth = 0) const override;
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
- llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
+ Align getPrefLoopAlignment(MachineLoop *ML) const override;
void allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF,
Modified: llvm/trunk/lib/Target/ARC/ARCMachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARC/ARCMachineFunctionInfo.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARC/ARCMachineFunctionInfo.h (original)
+++ llvm/trunk/lib/Target/ARC/ARCMachineFunctionInfo.h Fri Sep 27 05:54:21 2019
@@ -35,7 +35,7 @@ public:
: ReturnStackOffsetSet(false), VarArgsFrameIndex(0),
ReturnStackOffset(-1U), MaxCallStackReq(0) {
// Functions are 4-byte aligned.
- MF.setAlignment(llvm::Align(4));
+ MF.setAlignment(Align(4));
}
~ARCFunctionInfo() {}
Modified: llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMAsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -168,7 +168,7 @@ bool ARMAsmPrinter::runOnMachineFunction
// relatively easy to exceed the thumb branch range within a TU.
if (! ThumbIndirectPads.empty()) {
OutStreamer->EmitAssemblerFlag(MCAF_Code16);
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
for (std::pair<unsigned, MCSymbol *> &TIP : ThumbIndirectPads) {
OutStreamer->EmitLabel(TIP.second);
EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBX)
@@ -526,7 +526,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Mod
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -539,7 +539,7 @@ void ARMAsmPrinter::EmitEndOfAsmFile(Mod
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getThreadLocalPointerSection());
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
for (auto &Stub : Stubs)
emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second);
@@ -940,7 +940,7 @@ void ARMAsmPrinter::EmitJumpTableAddrs(c
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -986,7 +986,7 @@ void ARMAsmPrinter::EmitJumpTableInsts(c
// Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
// ARM mode tables.
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
// Emit a label for the jump table.
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
@@ -1015,7 +1015,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(
unsigned JTI = MO1.getIndex();
if (Subtarget->isThumb1Only())
- EmitAlignment(llvm::Align(4));
+ EmitAlignment(Align(4));
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
OutStreamer->EmitLabel(JTISymbol);
@@ -1058,7 +1058,7 @@ void ARMAsmPrinter::EmitJumpTableTBInst(
OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
// Make sure the next instruction is 2-byte aligned.
- EmitAlignment(llvm::Align(2));
+ EmitAlignment(Align(2));
}
void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
Modified: llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.cpp Fri Sep 27 05:54:21 2019
@@ -47,7 +47,7 @@ void ARMBasicBlockUtils::computeBlockSiz
BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
BBI.Size = 0;
BBI.Unalign = 0;
- BBI.PostAlign = llvm::Align::None();
+ BBI.PostAlign = Align::None();
for (MachineInstr &I : *MBB) {
BBI.Size += TII->getInstSizeInBytes(I);
@@ -62,8 +62,8 @@ void ARMBasicBlockUtils::computeBlockSiz
// tBR_JTr contains a .align 2 directive.
if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
- BBI.PostAlign = llvm::Align(4);
- MBB->getParent()->ensureAlignment(llvm::Align(4));
+ BBI.PostAlign = Align(4);
+ MBB->getParent()->ensureAlignment(Align(4));
}
}
@@ -126,7 +126,7 @@ void ARMBasicBlockUtils::adjustBBOffsets
for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) {
// Get the offset and known bits at the end of the layout predecessor.
// Include the alignment of the current block.
- const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment();
+ const Align Align = MF.getBlockNumbered(i)->getAlignment();
const unsigned Offset = BBInfo[i - 1].postOffset(Align);
const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align);
Modified: llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMBasicBlockInfo.h Fri Sep 27 05:54:21 2019
@@ -27,11 +27,11 @@ using BBInfoVector = SmallVectorImpl<Bas
/// unknown offset bits. This does not include alignment padding caused by
/// known offset bits.
///
-/// @param Align alignment
+/// @param Alignment alignment
/// @param KnownBits Number of known low offset bits.
-inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) {
- if (KnownBits < Log2(Align))
- return Align.value() - (1ull << KnownBits);
+inline unsigned UnknownPadding(Align Alignment, unsigned KnownBits) {
+ if (KnownBits < Log2(Alignment))
+ return Alignment.value() - (1ull << KnownBits);
return 0;
}
@@ -67,7 +67,7 @@ struct BasicBlockInfo {
/// PostAlign - When > 1, the block terminator contains a .align
/// directive, so the end of the block is aligned to PostAlign bytes.
- llvm::Align PostAlign;
+ Align PostAlign;
BasicBlockInfo() = default;
@@ -86,10 +86,10 @@ struct BasicBlockInfo {
/// Compute the offset immediately following this block. If Align is
/// specified, return the offset the successor block will get if it has
/// this alignment.
- unsigned postOffset(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postOffset(Align Alignment = Align::None()) const {
unsigned PO = Offset + Size;
- const llvm::Align PA = std::max(PostAlign, Align);
- if (PA == llvm::Align::None())
+ const Align PA = std::max(PostAlign, Alignment);
+ if (PA == Align::None())
return PO;
// Add alignment padding from the terminator.
return PO + UnknownPadding(PA, internalKnownBits());
@@ -100,7 +100,7 @@ struct BasicBlockInfo {
/// instruction alignment. An aligned terminator may increase the number
/// of know bits.
/// If LogAlign is given, also consider the alignment of the next block.
- unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const {
+ unsigned postKnownBits(Align Align = Align::None()) const {
return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits());
}
};
Modified: llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp Fri Sep 27 05:54:21 2019
@@ -247,7 +247,7 @@ namespace {
void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
bool BBHasFallthrough(MachineBasicBlock *MBB);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- llvm::Align getCPEAlign(const MachineInstr *CPEMI);
+ Align getCPEAlign(const MachineInstr *CPEMI);
void scanFunctionJumpTables();
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
@@ -404,7 +404,7 @@ bool ARMConstantIslands::runOnMachineFun
// Functions with jump tables need an alignment of 4 because they use the ADR
// instruction, which aligns the PC to 4 bytes before adding an offset.
if (!T2JumpTables.empty())
- MF->ensureAlignment(llvm::Align(4));
+ MF->ensureAlignment(Align(4));
/// Remove dead constant pool entries.
MadeChange |= removeUnusedCPEntries();
@@ -494,7 +494,7 @@ ARMConstantIslands::doInitialConstPlacem
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes.
- const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
const unsigned MaxLogAlign = Log2(MaxAlign);
// Mark the basic block as required by the const-pool.
@@ -650,25 +650,25 @@ ARMConstantIslands::findConstPoolEntry(u
/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI.
-llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
+Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
switch (CPEMI->getOpcode()) {
case ARM::CONSTPOOL_ENTRY:
break;
case ARM::JUMPTABLE_TBB:
- return isThumb1 ? llvm::Align(4) : llvm::Align(1);
+ return isThumb1 ? Align(4) : Align(1);
case ARM::JUMPTABLE_TBH:
- return isThumb1 ? llvm::Align(4) : llvm::Align(2);
+ return isThumb1 ? Align(4) : Align(2);
case ARM::JUMPTABLE_INSTS:
- return llvm::Align(2);
+ return Align(2);
case ARM::JUMPTABLE_ADDRS:
- return llvm::Align(4);
+ return Align(4);
default:
llvm_unreachable("unknown constpool entry kind");
}
unsigned CPI = getCombinedIndex(CPEMI);
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- return llvm::Align(MCP->getConstants()[CPI].getAlignment());
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// scanFunctionJumpTables - Do a scan of the function, building up
@@ -1021,10 +1021,10 @@ bool ARMConstantIslands::isWaterInRange(
MachineBasicBlock* Water, CPUser &U,
unsigned &Growth) {
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
unsigned NextBlockOffset;
- llvm::Align NextBlockAlignment;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = Water->getIterator();
if (++NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
@@ -1214,7 +1214,7 @@ bool ARMConstantIslands::findAvailableWa
// inserting islands between BB0 and BB1 makes other accesses out of range.
MachineBasicBlock *UserBB = U.MI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
- const llvm::Align CPEAlign = getCPEAlign(U.CPEMI);
+ const Align CPEAlign = getCPEAlign(U.CPEMI);
unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
return false;
@@ -1268,7 +1268,7 @@ void ARMConstantIslands::createNewWater(
CPUser &U = CPUsers[CPUserIndex];
MachineInstr *UserMI = U.MI;
MachineInstr *CPEMI = U.CPEMI;
- const llvm::Align CPEAlign = getCPEAlign(CPEMI);
+ const Align CPEAlign = getCPEAlign(CPEMI);
MachineBasicBlock *UserMBB = UserMI->getParent();
BBInfoVector &BBInfo = BBUtils->getBBInfo();
const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
@@ -1323,7 +1323,7 @@ void ARMConstantIslands::createNewWater(
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// Align which is the largest possible alignment in the function.
- const llvm::Align Align = MF->getAlignment();
+ const Align Align = MF->getAlignment();
assert(Align >= CPEAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(Align, KnownBits);
@@ -1501,9 +1501,9 @@ bool ARMConstantIslands::handleConstantP
// Always align the new block because CP entries can be smaller than 4
// bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
// be an already aligned constant pool block.
- const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4);
- if (NewMBB->getAlignment() < Align)
- NewMBB->setAlignment(Align);
+ const Align Alignment = isThumb ? Align(2) : Align(4);
+ if (NewMBB->getAlignment() < Alignment)
+ NewMBB->setAlignment(Alignment);
// Remove the original WaterList entry; we want subsequent insertions in
// this vicinity to go after the one we're about to insert. This
@@ -1566,7 +1566,7 @@ void ARMConstantIslands::removeDeadCPEMI
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(llvm::Align::None());
+ CPEBB->setAlignment(Align::None());
} else {
// Entries are sorted by descending alignment, so realign from the front.
CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -1428,16 +1428,14 @@ ARMTargetLowering::ARMTargetLowering(con
// On ARM arguments smaller than 4 bytes are extended, so all arguments
// are at least 4 bytes aligned.
- setMinStackArgumentAlignment(llvm::Align(4));
+ setMinStackArgumentAlignment(Align(4));
// Prefer likely predicted branches to selects on out-of-order cores.
PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
- setPrefLoopAlignment(
- llvm::Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
+ setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
- setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2)
- : llvm::Align(4));
+ setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
if (Subtarget->isThumb() || Subtarget->isThumb2())
setTargetDAGCombine(ISD::ABS);
Modified: llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AVR/AVRISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -236,7 +236,7 @@ AVRTargetLowering::AVRTargetLowering(con
setLibcallName(RTLIB::SIN_F32, "sin");
setLibcallName(RTLIB::COS_F32, "cos");
- setMinFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
setMinimumJumpTableEntries(UINT_MAX);
}
Modified: llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/BPF/BPFISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -133,8 +133,8 @@ BPFTargetLowering::BPFTargetLowering(con
setBooleanContents(ZeroOrOneBooleanContent);
// Function alignments
- setMinFunctionAlignment(llvm::Align(8));
- setPrefFunctionAlignment(llvm::Align(8));
+ setMinFunctionAlignment(Align(8));
+ setPrefFunctionAlignment(Align(8));
if (BPFExpandMemcpyInOrder) {
// LLVM generic code will try to expand memcpy into load/store pairs at this
Modified: llvm/trunk/lib/Target/Hexagon/HexagonBranchRelaxation.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonBranchRelaxation.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonBranchRelaxation.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonBranchRelaxation.cpp Fri Sep 27 05:54:21 2019
@@ -105,7 +105,7 @@ void HexagonBranchRelaxation::computeOff
// offset of the current instruction from the start.
unsigned InstOffset = 0;
for (auto &B : MF) {
- if (B.getAlignment() != llvm::Align::None()) {
+ if (B.getAlignment() != Align::None()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
Modified: llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonFixupHwLoops.cpp Fri Sep 27 05:54:21 2019
@@ -114,7 +114,7 @@ bool HexagonFixupHwLoops::fixupLoopInstr
// First pass - compute the offset of each basic block.
for (const MachineBasicBlock &MBB : MF) {
- if (MBB.getAlignment() != llvm::Align::None()) {
+ if (MBB.getAlignment() != Align::None()) {
// Although we don't know the exact layout of the final code, we need
// to account for alignment padding somehow. This heuristic pads each
// aligned basic block according to the alignment value.
Modified: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp Fri Sep 27 05:54:21 2019
@@ -1380,7 +1380,7 @@ void HexagonFrameLowering::processFuncti
Align A = MFI.getLocalFrameMaxAlign();
assert(A <= 8 && "Unexpected local frame alignment");
if (A == 1)
- MFI.setLocalFrameMaxAlign(llvm::Align(8));
+ MFI.setLocalFrameMaxAlign(Align(8));
MFI.setUseLocalStackAllocationBlock(true);
// Set the physical aligned-stack base address register.
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -1235,9 +1235,9 @@ HexagonTargetLowering::HexagonTargetLowe
Subtarget(ST) {
auto &HRI = *Subtarget.getRegisterInfo();
- setPrefLoopAlignment(llvm::Align(16));
- setMinFunctionAlignment(llvm::Align(4));
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefLoopAlignment(Align(16));
+ setMinFunctionAlignment(Align(4));
+ setPrefFunctionAlignment(Align(16));
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
setBooleanContents(TargetLoweringBase::UndefinedBooleanContent);
setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent);
Modified: llvm/trunk/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -116,8 +116,8 @@ void HexagonMCELFStreamer::HexagonMCEmit
}
// Update the maximum alignment of the section if necessary.
- if (llvm::Align(ByteAlignment) > Section.getAlignment())
- Section.setAlignment(llvm::Align(ByteAlignment));
+ if (Align(ByteAlignment) > Section.getAlignment())
+ Section.setAlignment(Align(ByteAlignment));
SwitchSection(P.first, P.second);
} else {
Modified: llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Lanai/LanaiISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -145,8 +145,8 @@ LanaiTargetLowering::LanaiTargetLowering
setTargetDAGCombine(ISD::XOR);
// Function alignments
- setMinFunctionAlignment(llvm::Align(4));
- setPrefFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
+ setPrefFunctionAlignment(Align(4));
setJumpIsExpensive(true);
Modified: llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -327,8 +327,8 @@ MSP430TargetLowering::MSP430TargetLoweri
setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN);
// TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll
- setMinFunctionAlignment(llvm::Align(2));
- setPrefFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
+ setPrefFunctionAlignment(Align(2));
}
SDValue MSP430TargetLowering::LowerOperation(SDValue Op,
Modified: llvm/trunk/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/AsmParser/MipsAsmParser.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/AsmParser/MipsAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/Mips/AsmParser/MipsAsmParser.cpp Fri Sep 27 05:54:21 2019
@@ -1805,9 +1805,8 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(
- Offset.getImm(),
- (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4))))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEZ:
@@ -1836,9 +1835,8 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(
- Offset.getImm(),
- (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4))))
+ if (offsetToAlignment(Offset.getImm(),
+ (inMicroMipsMode() ? Align(2) : Align(4))))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BGEC: case Mips::BGEC_MMR6:
@@ -1853,7 +1851,7 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BLEZC: case Mips::BLEZC_MMR6:
@@ -1866,7 +1864,7 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(18, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZC: case Mips::BEQZC_MMR6:
@@ -1877,7 +1875,7 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isIntN(23, Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(4)))
+ if (offsetToAlignment(Offset.getImm(), Align(4)))
return Error(IDLoc, "branch to misaligned address");
break;
case Mips::BEQZ16_MM:
@@ -1890,7 +1888,7 @@ bool MipsAsmParser::processInstruction(M
break; // We'll deal with this situation later on when applying fixups.
if (!isInt<8>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(2)))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
break;
}
@@ -3495,7 +3493,7 @@ bool MipsAsmParser::expandUncondBranchMM
} else {
if (!isInt<17>(Offset.getImm()))
return Error(IDLoc, "branch target out of range");
- if (offsetToAlignment(Offset.getImm(), llvm::Align(2)))
+ if (offsetToAlignment(Offset.getImm(), Align(2)))
return Error(IDLoc, "branch to misaligned address");
Inst.clear();
Inst.setOpcode(Mips::BEQ_MM);
Modified: llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h (original)
+++ llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h Fri Sep 27 05:54:21 2019
@@ -15,7 +15,7 @@
namespace llvm {
// NaCl MIPS sandbox's instruction bundle size.
-static const llvm::Align MIPS_NACL_BUNDLE_ALIGN = llvm::Align(16);
+static const Align MIPS_NACL_BUNDLE_ALIGN = Align(16);
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore = nullptr);
Modified: llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp Fri Sep 27 05:54:21 2019
@@ -37,7 +37,7 @@ void MipsRegInfoRecord::EmitMipsOptionRe
Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS,
ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP, 1, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(8));
+ Sec->setAlignment(Align(8));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ELF::ODK_REGINFO, 1); // kind
@@ -55,7 +55,7 @@ void MipsRegInfoRecord::EmitMipsOptionRe
MCSectionELF *Sec = Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO,
ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(MTS->getABI().IsN32() ? llvm::Align(8) : llvm::Align(4));
+ Sec->setAlignment(MTS->getABI().IsN32() ? Align(8) : Align(4));
Streamer->SwitchSection(Sec);
Streamer->EmitIntValue(ri_gprmask, 4);
Modified: llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -901,12 +901,9 @@ void MipsTargetELFStreamer::finish() {
MCSection &BSSSection = *OFI.getBSSSection();
MCA.registerSection(BSSSection);
- TextSection.setAlignment(
- llvm::Align(std::max(16u, TextSection.getAlignment())));
- DataSection.setAlignment(
- llvm::Align(std::max(16u, DataSection.getAlignment())));
- BSSSection.setAlignment(
- llvm::Align(std::max(16u, BSSSection.getAlignment())));
+ TextSection.setAlignment(Align(std::max(16u, TextSection.getAlignment())));
+ DataSection.setAlignment(Align(std::max(16u, DataSection.getAlignment())));
+ BSSSection.setAlignment(Align(std::max(16u, BSSSection.getAlignment())));
if (RoundSectionSizes) {
// Make sections sizes a multiple of the alignment. This is useful for
@@ -1029,7 +1026,7 @@ void MipsTargetELFStreamer::emitDirectiv
MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Context);
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(4));
+ Sec->setAlignment(Align(4));
OS.PushSection();
@@ -1319,7 +1316,7 @@ void MipsTargetELFStreamer::emitMipsAbiF
MCSectionELF *Sec = Context.getELFSection(
".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS, ELF::SHF_ALLOC, 24, "");
MCA.registerSection(*Sec);
- Sec->setAlignment(llvm::Align(8));
+ Sec->setAlignment(Align(8));
OS.SwitchSection(Sec);
OS << ABIFlagsSection;
Modified: llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsConstantIslandPass.cpp Fri Sep 27 05:54:21 2019
@@ -371,7 +371,7 @@ namespace {
void doInitialPlacement(std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- llvm::Align getCPEAlign(const MachineInstr &CPEMI);
+ Align getCPEAlign(const MachineInstr &CPEMI);
void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
unsigned getOffsetOf(MachineInstr *MI) const;
unsigned getUserOffset(CPUser&) const;
@@ -529,11 +529,11 @@ MipsConstantIslands::doInitialPlacement(
MF->push_back(BB);
// MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
- const llvm::Align MaxAlign(MCP->getConstantPoolAlignment());
+ const Align MaxAlign(MCP->getConstantPoolAlignment());
// Mark the basic block as required by the const-pool.
// If AlignConstantIslands isn't set, use 4-byte alignment for everything.
- BB->setAlignment(AlignConstantIslands ? MaxAlign : llvm::Align(4));
+ BB->setAlignment(AlignConstantIslands ? MaxAlign : Align(4));
// The function needs to be as aligned as the basic blocks. The linker may
// move functions around based on their alignment.
@@ -619,16 +619,16 @@ MipsConstantIslands::CPEntry
/// getCPEAlign - Returns the required alignment of the constant pool entry
/// represented by CPEMI. Alignment is measured in log2(bytes) units.
-llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
+Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) {
assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY);
// Everything is 4-byte aligned unless AlignConstantIslands is set.
if (!AlignConstantIslands)
- return llvm::Align(4);
+ return Align(4);
unsigned CPI = CPEMI.getOperand(1).getIndex();
assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
- return llvm::Align(MCP->getConstants()[CPI].getAlignment());
+ return Align(MCP->getConstants()[CPI].getAlignment());
}
/// initializeFunctionInfo - Do the initial scan of the function, building up
@@ -936,11 +936,11 @@ bool MipsConstantIslands::isWaterInRange
unsigned &Growth) {
unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset();
unsigned NextBlockOffset;
- llvm::Align NextBlockAlignment;
+ Align NextBlockAlignment;
MachineFunction::const_iterator NextBlock = ++Water->getIterator();
if (NextBlock == MF->end()) {
NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
- NextBlockAlignment = llvm::Align::None();
+ NextBlockAlignment = Align::None();
} else {
NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
NextBlockAlignment = NextBlock->getAlignment();
@@ -1251,7 +1251,7 @@ void MipsConstantIslands::createNewWater
// Try to split the block so it's fully aligned. Compute the latest split
// point where we can add a 4-byte branch instruction, and then align to
// Align which is the largest possible alignment in the function.
- const llvm::Align Align = MF->getAlignment();
+ const Align Align = MF->getAlignment();
unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
BaseInsertOffset));
@@ -1423,7 +1423,7 @@ void MipsConstantIslands::removeDeadCPEM
BBInfo[CPEBB->getNumber()].Size = 0;
// This block no longer needs to be aligned.
- CPEBB->setAlignment(llvm::Align(1));
+ CPEBB->setAlignment(Align(1));
} else {
// Entries are sorted by descending alignment, so realign from the front.
CPEBB->setAlignment(getCPEAlign(*CPEBB->begin()));
@@ -1522,7 +1522,7 @@ MipsConstantIslands::fixupUnconditionalB
// We should have a way to back out this alignment restriction if we "can" later.
// but it is not harmful.
//
- DestBB->setAlignment(llvm::Align(4));
+ DestBB->setAlignment(Align(4));
Br.MaxDisp = ((1<<24)-1) * 2;
MI->setDesc(TII->get(Mips::JalB16));
}
Modified: llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -514,13 +514,12 @@ MipsTargetLowering::MipsTargetLowering(c
setLibcallName(RTLIB::SRA_I128, nullptr);
}
- setMinFunctionAlignment(Subtarget.isGP64bit() ? llvm::Align(8)
- : llvm::Align(4));
+ setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
// The arguments on the stack are defined in terms of 4-byte slots on O32
// and 8-byte slots on N32/N64.
- setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? llvm::Align(8)
- : llvm::Align(4));
+ setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
+ : Align(4));
setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
@@ -2148,7 +2147,7 @@ SDValue MipsTargetLowering::lowerVAARG(S
EVT VT = Node->getValueType(0);
SDValue Chain = Node->getOperand(0);
SDValue VAListPtr = Node->getOperand(1);
- const llvm::Align Align =
+ const Align Align =
llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDLoc DL(Node);
Modified: llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSEISelDAGToDAG.cpp Fri Sep 27 05:54:21 2019
@@ -247,8 +247,8 @@ bool MipsSEDAGToDAGISel::selectAddrFrame
Base = Addr.getOperand(0);
// If base is a FI, additional offset calculation is done in
// eliminateFrameIndex, otherwise we need to check the alignment
- const llvm::Align Align(1ULL << ShiftAmount);
- if (!isAligned(Align, CN->getZExtValue()))
+ const Align Alignment(1ULL << ShiftAmount);
+ if (!isAligned(Alignment, CN->getZExtValue()))
return false;
}
Modified: llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp Fri Sep 27 05:54:21 2019
@@ -212,7 +212,7 @@ void MipsSERegisterInfo::eliminateFI(Mac
// element size), otherwise it is a 16-bit signed immediate.
unsigned OffsetBitSize =
getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
- const llvm::Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode()));
+ const Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode()));
if (OffsetBitSize < 16 && isInt<16>(Offset) &&
(!isIntN(OffsetBitSize, Offset) || !isAligned(OffsetAlign, Offset))) {
// If we have an offset that needs to fit into a signed n-bit immediate
Modified: llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCAsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -1634,7 +1634,7 @@ bool PPCDarwinAsmPrinter::doFinalization
if (!Stubs.empty()) {
// Switch with ".non_lazy_symbol_pointer" directive.
OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4));
+ EmitAlignment(isPPC64 ? Align(8) : Align(4));
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
// L_foo$stub:
Modified: llvm/trunk/lib/Target/PowerPC/PPCBranchSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCBranchSelector.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCBranchSelector.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCBranchSelector.cpp Fri Sep 27 05:54:21 2019
@@ -81,20 +81,20 @@ FunctionPass *llvm::createPPCBranchSelec
/// original Offset.
unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB,
unsigned Offset) {
- const llvm::Align Align = MBB.getAlignment();
- if (Align == 1)
+ const Align Alignment = MBB.getAlignment();
+ if (Alignment == Align::None())
return 0;
- const llvm::Align ParentAlign = MBB.getParent()->getAlignment();
+ const Align ParentAlign = MBB.getParent()->getAlignment();
- if (Align <= ParentAlign)
- return offsetToAlignment(Offset, Align);
+ if (Alignment <= ParentAlign)
+ return offsetToAlignment(Offset, Alignment);
// The alignment of this MBB is larger than the function's alignment, so we
// can't tell whether or not it will insert nops. Assume that it will.
if (FirstImpreciseBlock < 0)
FirstImpreciseBlock = MBB.getNumber();
- return Align.value() + offsetToAlignment(Offset, Align);
+ return Alignment.value() + offsetToAlignment(Offset, Alignment);
}
/// We need to be careful about the offset of the first block in the function
@@ -178,7 +178,7 @@ int PPCBSel::computeBranchSize(MachineFu
const MachineBasicBlock *Dest,
unsigned BrOffset) {
int BranchSize;
- llvm::Align MaxAlign = llvm::Align(4);
+ Align MaxAlign = Align(4);
bool NeedExtraAdjustment = false;
if (Dest->getNumber() <= Src->getNumber()) {
// If this is a backwards branch, the delta is the offset from the
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -139,7 +139,7 @@ PPCTargetLowering::PPCTargetLowering(con
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
// arguments are at least 4/8 bytes aligned.
bool isPPC64 = Subtarget.isPPC64();
- setMinStackArgumentAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4));
+ setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
// Set up the register classes.
addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
@@ -1179,9 +1179,9 @@ PPCTargetLowering::PPCTargetLowering(con
setJumpIsExpensive();
}
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
if (Subtarget.isDarwin())
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
switch (Subtarget.getDarwinDirective()) {
default: break;
@@ -1198,8 +1198,8 @@ PPCTargetLowering::PPCTargetLowering(con
case PPC::DIR_PWR7:
case PPC::DIR_PWR8:
case PPC::DIR_PWR9:
- setPrefLoopAlignment(llvm::Align(16));
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefLoopAlignment(Align(16));
+ setPrefFunctionAlignment(Align(16));
break;
}
@@ -14110,7 +14110,7 @@ void PPCTargetLowering::computeKnownBits
}
}
-llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
+Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
switch (Subtarget.getDarwinDirective()) {
default: break;
case PPC::DIR_970:
@@ -14131,7 +14131,7 @@ llvm::Align PPCTargetLowering::getPrefLo
// Actual alignment of the loop will depend on the hotness check and other
// logic in alignBlocks.
if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
- return llvm::Align(32);
+ return Align(32);
}
const PPCInstrInfo *TII = Subtarget.getInstrInfo();
@@ -14147,7 +14147,7 @@ llvm::Align PPCTargetLowering::getPrefLo
}
if (LoopSize > 16 && LoopSize <= 32)
- return llvm::Align(32);
+ return Align(32);
break;
}
Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.h Fri Sep 27 05:54:21 2019
@@ -742,7 +742,7 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
- llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override;
+ Align getPrefLoopAlignment(MachineLoop *ML) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true;
Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -198,7 +198,7 @@ RISCVTargetLowering::RISCVTargetLowering
setBooleanContents(ZeroOrOneBooleanContent);
// Function alignments.
- const llvm::Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
+ const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
setMinFunctionAlignment(FunctionAlignment);
setPrefFunctionAlignment(FunctionAlignment);
Modified: llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Sparc/SparcISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -1805,7 +1805,7 @@ SparcTargetLowering::SparcTargetLowering
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setMinFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(4));
computeRegisterProperties(Subtarget->getRegisterInfo());
}
Modified: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -120,9 +120,9 @@ SystemZTargetLowering::SystemZTargetLowe
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// Instructions are strings of 2-byte aligned 2-byte values.
- setMinFunctionAlignment(llvm::Align(2));
+ setMinFunctionAlignment(Align(2));
// For performance reasons we prefer 16-byte alignment.
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
// Handle operations that are handled in a similar way for all types.
for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
Modified: llvm/trunk/lib/Target/SystemZ/SystemZLongBranch.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZLongBranch.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZLongBranch.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZLongBranch.cpp Fri Sep 27 05:54:21 2019
@@ -87,7 +87,7 @@ struct MBBInfo {
// The minimum alignment of the block.
// This value never changes.
- llvm::Align Alignment;
+ Align Alignment;
// The number of terminators in this block. This value never changes.
unsigned NumTerminators = 0;
Modified: llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86AsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -575,7 +575,7 @@ void X86AsmPrinter::EmitStartOfAsmFile(M
// Emitting note header.
int WordSize = TT.isArch64Bit() ? 8 : 4;
- EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8));
+ EmitAlignment(WordSize == 4 ? Align(4) : Align(8));
OutStreamer->EmitIntValue(4, 4 /*size*/); // data size for "GNU\0"
OutStreamer->EmitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size
OutStreamer->EmitIntValue(ELF::NT_GNU_PROPERTY_TYPE_0, 4 /*size*/);
@@ -585,7 +585,7 @@ void X86AsmPrinter::EmitStartOfAsmFile(M
OutStreamer->EmitIntValue(ELF::GNU_PROPERTY_X86_FEATURE_1_AND, 4);
OutStreamer->EmitIntValue(4, 4); // data size
OutStreamer->EmitIntValue(FeatureFlagsAnd, 4); // data
- EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8)); // padding
+ EmitAlignment(WordSize == 4 ? Align(4) : Align(8)); // padding
OutStreamer->endSection(Nt);
OutStreamer->SwitchSection(Cur);
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -1951,13 +1951,13 @@ X86TargetLowering::X86TargetLowering(con
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
- setPrefLoopAlignment(llvm::Align(1ULL << ExperimentalPrefLoopAlignment));
+ setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
- setPrefFunctionAlignment(llvm::Align(16));
+ setPrefFunctionAlignment(Align(16));
verifyIntrinsicTables();
}
Modified: llvm/trunk/lib/Target/X86/X86RetpolineThunks.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RetpolineThunks.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RetpolineThunks.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86RetpolineThunks.cpp Fri Sep 27 05:54:21 2019
@@ -279,7 +279,7 @@ void X86RetpolineThunks::populateThunk(M
CallTarget->addLiveIn(Reg);
CallTarget->setHasAddressTaken();
- CallTarget->setAlignment(llvm::Align(16));
+ CallTarget->setAlignment(Align(16));
insertRegReturnAddrClobber(*CallTarget, Reg);
CallTarget->back().setPreInstrSymbol(MF, TargetSym);
BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.cpp Fri Sep 27 05:54:21 2019
@@ -3294,7 +3294,7 @@ bool X86TTIImpl::isLegalMaskedStore(Type
return isLegalMaskedLoad(DataType);
}
-bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
+bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// The only supported nontemporal loads are for aligned vectors of 16 or 32
// bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
@@ -3305,7 +3305,7 @@ bool X86TTIImpl::isLegalNTLoad(Type *Dat
return false;
}
-bool X86TTIImpl::isLegalNTStore(Type *DataType, llvm::Align Alignment) {
+bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// SSE4A supports nontemporal stores of float and double at arbitrary
Modified: llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86TargetTransformInfo.h Fri Sep 27 05:54:21 2019
@@ -187,8 +187,8 @@ public:
bool canMacroFuseCmp();
bool isLegalMaskedLoad(Type *DataType);
bool isLegalMaskedStore(Type *DataType);
- bool isLegalNTLoad(Type *DataType, llvm::Align Alignment);
- bool isLegalNTStore(Type *DataType, llvm::Align Alignment);
+ bool isLegalNTLoad(Type *DataType, Align Alignment);
+ bool isLegalNTStore(Type *DataType, Align Alignment);
bool isLegalMaskedGather(Type *DataType);
bool isLegalMaskedScatter(Type *DataType);
bool isLegalMaskedExpandLoad(Type *DataType);
Modified: llvm/trunk/lib/Target/XCore/XCoreAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreAsmPrinter.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreAsmPrinter.cpp Fri Sep 27 05:54:21 2019
@@ -115,7 +115,7 @@ void XCoreAsmPrinter::EmitGlobalVariable
MCSymbol *GVSym = getSymbol(GV);
const Constant *C = GV->getInitializer();
- const llvm::Align Align(DL.getPrefTypeAlignment(C->getType()));
+ const Align Alignment(DL.getPrefTypeAlignment(C->getType()));
// Mark the start of the global
getTargetStreamer().emitCCTopData(GVSym->getName());
@@ -143,7 +143,7 @@ void XCoreAsmPrinter::EmitGlobalVariable
llvm_unreachable("Unknown linkage type!");
}
- EmitAlignment(std::max(Align, llvm::Align(4)), GV);
+ EmitAlignment(std::max(Alignment, Align(4)), GV);
if (GV->isThreadLocal()) {
report_fatal_error("TLS is not supported by this target!");
Modified: llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/XCore/XCoreISelLowering.cpp Fri Sep 27 05:54:21 2019
@@ -171,8 +171,8 @@ XCoreTargetLowering::XCoreTargetLowering
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
- setMinFunctionAlignment(llvm::Align(2));
- setPrefFunctionAlignment(llvm::Align(4));
+ setMinFunctionAlignment(Align(2));
+ setPrefFunctionAlignment(Align(4));
}
bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
Modified: llvm/trunk/lib/Transforms/Utils/Local.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/Local.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/Local.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/Local.cpp Fri Sep 27 05:54:21 2019
@@ -1132,10 +1132,10 @@ bool llvm::EliminateDuplicatePHINodes(Ba
/// often possible though. If alignment is important, a more reliable approach
/// is to simply align all global variables and allocation instructions to
/// their preferred alignment from the beginning.
-static unsigned enforceKnownAlignment(Value *V, unsigned Align,
+static unsigned enforceKnownAlignment(Value *V, unsigned Alignment,
unsigned PrefAlign,
const DataLayout &DL) {
- assert(PrefAlign > Align);
+ assert(PrefAlign > Alignment);
V = V->stripPointerCasts();
@@ -1146,36 +1146,36 @@ static unsigned enforceKnownAlignment(Va
// stripPointerCasts recurses through infinite layers of bitcasts,
// while computeKnownBits is not allowed to traverse more than 6
// levels.
- Align = std::max(AI->getAlignment(), Align);
- if (PrefAlign <= Align)
- return Align;
+ Alignment = std::max(AI->getAlignment(), Alignment);
+ if (PrefAlign <= Alignment)
+ return Alignment;
// If the preferred alignment is greater than the natural stack alignment
// then don't round up. This avoids dynamic stack realignment.
- if (DL.exceedsNaturalStackAlignment(llvm::Align(PrefAlign)))
- return Align;
+ if (DL.exceedsNaturalStackAlignment(Align(PrefAlign)))
+ return Alignment;
AI->setAlignment(PrefAlign);
return PrefAlign;
}
if (auto *GO = dyn_cast<GlobalObject>(V)) {
// TODO: as above, this shouldn't be necessary.
- Align = std::max(GO->getAlignment(), Align);
- if (PrefAlign <= Align)
- return Align;
+ Alignment = std::max(GO->getAlignment(), Alignment);
+ if (PrefAlign <= Alignment)
+ return Alignment;
// If there is a large requested alignment and we can, bump up the alignment
// of the global. If the memory we set aside for the global may not be the
// memory used by the final program then it is impossible for us to reliably
// enforce the preferred alignment.
if (!GO->canIncreaseAlignment())
- return Align;
+ return Alignment;
GO->setAlignment(PrefAlign);
return PrefAlign;
}
- return Align;
+ return Alignment;
}
unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
Modified: llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp (original)
+++ llvm/trunk/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp Fri Sep 27 05:54:21 2019
@@ -742,7 +742,7 @@ bool LoopVectorizationLegality::canVecto
assert(VecTy && "did not find vectorized version of stored type");
unsigned Alignment = getLoadStoreAlignment(ST);
assert(Alignment && "Alignment should be set");
- if (!TTI->isLegalNTStore(VecTy, llvm::Align(Alignment))) {
+ if (!TTI->isLegalNTStore(VecTy, Align(Alignment))) {
reportVectorizationFailure(
"nontemporal store instruction cannot be vectorized",
"nontemporal store instruction cannot be vectorized",
@@ -759,7 +759,7 @@ bool LoopVectorizationLegality::canVecto
assert(VecTy && "did not find vectorized version of load type");
unsigned Alignment = getLoadStoreAlignment(LD);
assert(Alignment && "Alignment should be set");
- if (!TTI->isLegalNTLoad(VecTy, llvm::Align(Alignment))) {
+ if (!TTI->isLegalNTLoad(VecTy, Align(Alignment))) {
reportVectorizationFailure(
"nontemporal load instruction cannot be vectorized",
"nontemporal load instruction cannot be vectorized",
Modified: llvm/trunk/tools/dsymutil/DwarfStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/dsymutil/DwarfStreamer.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/tools/dsymutil/DwarfStreamer.cpp (original)
+++ llvm/trunk/tools/dsymutil/DwarfStreamer.cpp Fri Sep 27 05:54:21 2019
@@ -260,7 +260,7 @@ void DwarfStreamer::emitAppleTypes(
/// Emit the swift_ast section stored in \p Buffers.
void DwarfStreamer::emitSwiftAST(StringRef Buffer) {
MCSection *SwiftASTSection = MOFI->getDwarfSwiftASTSection();
- SwiftASTSection->setAlignment(llvm::Align(32));
+ SwiftASTSection->setAlignment(Align(32));
MS->SwitchSection(SwiftASTSection);
MS->EmitBytes(Buffer);
}
@@ -339,7 +339,7 @@ void DwarfStreamer::emitUnitRangesEntrie
sizeof(int8_t); // Segment Size (in bytes)
unsigned TupleSize = AddressSize * 2;
- unsigned Padding = offsetToAlignment(HeaderSize, llvm::Align(TupleSize));
+ unsigned Padding = offsetToAlignment(HeaderSize, Align(TupleSize));
Asm->EmitLabelDifference(EndLabel, BeginLabel, 4); // Arange length
Asm->OutStreamer->EmitLabel(BeginLabel);
Modified: llvm/trunk/tools/llvm-cov/TestingSupport.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-cov/TestingSupport.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-cov/TestingSupport.cpp (original)
+++ llvm/trunk/tools/llvm-cov/TestingSupport.cpp Fri Sep 27 05:54:21 2019
@@ -100,7 +100,7 @@ int convertForTestingMain(int argc, cons
encodeULEB128(ProfileNamesAddress, OS);
OS << ProfileNamesData;
// Coverage mapping data is expected to have an alignment of 8.
- for (unsigned Pad = offsetToAlignment(OS.tell(), llvm::Align(8)); Pad; --Pad)
+ for (unsigned Pad = offsetToAlignment(OS.tell(), Align(8)); Pad; --Pad)
OS.write(uint8_t(0));
OS << CoverageMappingData;
Modified: llvm/trunk/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp?rev=373081&r1=373080&r2=373081&view=diff
==============================================================================
--- llvm/trunk/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp (original)
+++ llvm/trunk/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp Fri Sep 27 05:54:21 2019
@@ -146,7 +146,7 @@ uint64_t MachOLayoutBuilder::layoutSegme
Sec.Offset = 0;
} else {
uint64_t PaddingSize =
- offsetToAlignment(SegFileSize, llvm::Align(1ull << Sec.Align));
+ offsetToAlignment(SegFileSize, Align(1ull << Sec.Align));
Sec.Offset = SegOffset + SegFileSize + PaddingSize;
Sec.Size = Sec.Content.size();
SegFileSize += PaddingSize + Sec.Size;
More information about the llvm-commits
mailing list