[llvm] 74eac90 - [Alignment][NFC] MachineMemOperand::getAlign/getBaseAlign
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 27 08:49:26 PDT 2020
Author: Guillaume Chatelet
Date: 2020-03-27T15:49:13Z
New Revision: 74eac9031afb021dbd2c7702b1e814fe80e3fe64
URL: https://github.com/llvm/llvm-project/commit/74eac9031afb021dbd2c7702b1e814fe80e3fe64
DIFF: https://github.com/llvm/llvm-project/commit/74eac9031afb021dbd2c7702b1e814fe80e3fe64.diff
LOG: [Alignment][NFC] MachineMemOperand::getAlign/getBaseAlign
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: arsenm, dschuff, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, jrtc27, atanasyan, jfb, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76925
Added:
Modified:
llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
llvm/include/llvm/CodeGen/MachineMemOperand.h
llvm/include/llvm/CodeGen/SelectionDAGNodes.h
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
llvm/lib/CodeGen/MachineFunction.cpp
llvm/lib/CodeGen/MachineOperand.cpp
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/CodeGen/TargetLoweringBase.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
llvm/lib/Target/Mips/MipsInstructionSelector.cpp
llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
llvm/lib/Target/X86/X86InstrInfo.cpp
llvm/lib/Target/X86/X86InstructionSelector.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index eea39fc0fe62..9e9e1806bc6f 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -429,7 +429,7 @@ bool InstructionSelector::executeMatchTable(
dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
<< "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
<< ")->getAlignment() >= " << MinAlign << ")\n");
- if (MMO->getAlignment() < MinAlign && handleReject() == RejectAndGiveUp)
+ if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
return false;
break;
diff --git a/llvm/include/llvm/CodeGen/MachineMemOperand.h b/llvm/include/llvm/CodeGen/MachineMemOperand.h
index 76ae8dec668f..f8ff10053ceb 100644
--- a/llvm/include/llvm/CodeGen/MachineMemOperand.h
+++ b/llvm/include/llvm/CodeGen/MachineMemOperand.h
@@ -225,11 +225,25 @@ class MachineMemOperand {
/// Return the minimum known alignment in bytes of the actual memory
/// reference.
- uint64_t getAlignment() const;
+ /// FIXME: Remove once transition to Align is over.
+ LLVM_ATTRIBUTE_DEPRECATED(uint64_t getAlignment() const,
+ "Use getAlign instead");
+
+ /// Return the minimum known alignment in bytes of the actual memory
+ /// reference.
+ Align getAlign() const;
+
+ /// Return the minimum known alignment in bytes of the base address, without
+ /// the offset.
+ /// FIXME: Remove once transition to Align is over.
+ LLVM_ATTRIBUTE_DEPRECATED(uint64_t getBaseAlignment() const,
+ "Use getBaseAlign instead") {
+ return BaseAlign.value();
+ }
/// Return the minimum known alignment in bytes of the base address, without
/// the offset.
- uint64_t getBaseAlignment() const { return BaseAlign.value(); }
+ Align getBaseAlign() const { return BaseAlign; }
/// Return the AA tags for the memory reference.
AAMDNodes getAAInfo() const { return AAInfo; }
@@ -307,7 +321,7 @@ class MachineMemOperand {
LHS.getFlags() == RHS.getFlags() &&
LHS.getAAInfo() == RHS.getAAInfo() &&
LHS.getRanges() == RHS.getRanges() &&
- LHS.getAlignment() == RHS.getAlignment() &&
+ LHS.getAlign() == RHS.getAlign() &&
LHS.getAddrSpace() == RHS.getAddrSpace();
}
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 17b66363bb07..1c145c5aabfc 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1292,12 +1292,8 @@ class MemSDNode : public SDNode {
bool writeMem() const { return MMO->isStore(); }
/// Returns alignment and volatility of the memory access
- unsigned getOriginalAlignment() const {
- return MMO->getBaseAlignment();
- }
- unsigned getAlignment() const {
- return MMO->getAlignment();
- }
+ unsigned getOriginalAlignment() const { return MMO->getBaseAlign().value(); }
+ unsigned getAlignment() const { return MMO->getAlign().value(); }
/// Return the SubclassData value, without HasDebugValue. This contains an
/// encoding of the volatile flag, as well as bits used by subclasses. This
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e9e4e2b30698..763f433d3417 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -1268,7 +1268,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (IsVolatile)
return false;
- Align DstAlign(MemOp->getBaseAlignment());
+ Align DstAlign = MemOp->getBaseAlign();
Align SrcAlign;
Register Dst = MI.getOperand(1).getReg();
Register Src = MI.getOperand(2).getReg();
@@ -1277,7 +1277,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (ID != Intrinsic::memset) {
assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
MemOp = *(++MMOIt);
- SrcAlign = Align(MemOp->getBaseAlignment());
+ SrcAlign = MemOp->getBaseAlign();
}
// See if this is a constant length copy
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
index 2658434fe949..5a8f8fd6a698 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
@@ -506,8 +506,7 @@ LegalizerInfo::getAction(const MachineInstr &MI,
SmallVector<LegalityQuery::MemDesc, 2> MemDescrs;
for (const auto &MMO : MI.memoperands())
MemDescrs.push_back({8 * MMO->getSize() /* in bits */,
- 8 * MMO->getAlignment(),
- MMO->getOrdering()});
+ 8 * MMO->getAlign().value(), MMO->getOrdering()});
return getAction({MI.getOpcode(), Types, MemDescrs});
}
diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
index 47e712aaccf3..004abc8a6ab0 100644
--- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
+++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
@@ -112,7 +112,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
MIOperands.push_back((unsigned)Op->getOrdering());
MIOperands.push_back((unsigned)Op->getAddrSpace());
MIOperands.push_back((unsigned)Op->getSyncScopeID());
- MIOperands.push_back((unsigned)Op->getBaseAlignment());
+ MIOperands.push_back((unsigned)Op->getBaseAlign().value());
MIOperands.push_back((unsigned)Op->getFailureOrdering());
}
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 281d1bc15bfa..a8f653ee8791 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -486,14 +486,14 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
// If there is no pointer value, the offset isn't tracked so we need to adjust
// the base alignment.
- unsigned Align = PtrInfo.V.isNull()
- ? MinAlign(MMO->getBaseAlignment(), Offset)
- : MMO->getBaseAlignment();
+ Align Alignment = PtrInfo.V.isNull()
+ ? commonAlignment(MMO->getBaseAlign(), Offset)
+ : MMO->getBaseAlign();
- return new (Allocator)
- MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size,
- Align, AAMDNodes(), nullptr, MMO->getSyncScopeID(),
- MMO->getOrdering(), MMO->getFailureOrdering());
+ return new (Allocator) MachineMemOperand(
+ PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, Alignment.value(),
+ AAMDNodes(), nullptr, MMO->getSyncScopeID(), MMO->getOrdering(),
+ MMO->getFailureOrdering());
}
MachineMemOperand *
@@ -503,18 +503,17 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
- return new (Allocator)
- MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(),
- MMO->getBaseAlignment(), AAInfo,
- MMO->getRanges(), MMO->getSyncScopeID(),
- MMO->getOrdering(), MMO->getFailureOrdering());
+ return new (Allocator) MachineMemOperand(
+ MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign().value(), AAInfo,
+ MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(),
+ MMO->getFailureOrdering());
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand::Flags Flags) {
return new (Allocator) MachineMemOperand(
- MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlignment(),
+ MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign().value(),
MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp
index 47d1c68d8c44..a2021319fb2e 100644
--- a/llvm/lib/CodeGen/MachineOperand.cpp
+++ b/llvm/lib/CodeGen/MachineOperand.cpp
@@ -1014,7 +1014,6 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue *>() ||
isa<PointerType>(PtrInfo.V.get<const Value *>()->getType())) &&
"invalid pointer value");
- assert(getBaseAlignment() == a && a != 0 && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
AtomicInfo.SSID = static_cast<unsigned>(SSID);
@@ -1032,7 +1031,7 @@ void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(Size);
ID.AddPointer(getOpaqueValue());
ID.AddInteger(getFlags());
- ID.AddInteger(getBaseAlignment());
+ ID.AddInteger(getBaseAlign().value());
}
void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
@@ -1041,9 +1040,9 @@ void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
assert(MMO->getSize() == getSize() && "Size mismatch!");
- if (MMO->getBaseAlignment() >= getBaseAlignment()) {
+ if (MMO->getBaseAlign() >= getBaseAlign()) {
// Update the alignment value.
- BaseAlign = Align(MMO->getBaseAlignment());
+ BaseAlign = MMO->getBaseAlign();
// Also update the base and offset, because the new alignment may
// not be applicable with the old ones.
PtrInfo = MMO->PtrInfo;
@@ -1052,8 +1051,12 @@ void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
/// getAlignment - Return the minimum known alignment in bytes of the
/// actual memory reference.
-uint64_t MachineMemOperand::getAlignment() const {
- return MinAlign(getBaseAlignment(), getOffset());
+uint64_t MachineMemOperand::getAlignment() const { return getAlign().value(); }
+
+/// getAlign - Return the minimum known alignment in bytes of the
+/// actual memory reference.
+Align MachineMemOperand::getAlign() const {
+ return commonAlignment(getBaseAlign(), getOffset());
}
void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
@@ -1148,8 +1151,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
}
}
MachineOperand::printOperandOffset(OS, getOffset());
- if (getBaseAlignment() != getSize())
- OS << ", align " << getBaseAlignment();
+ if (getBaseAlign() != getSize())
+ OS << ", align " << getBaseAlign().value();
auto AAInfo = getAAInfo();
if (AAInfo.TBAA) {
OS << ", !tbaa ";
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 964daf85905f..463ddd5cdc1f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -21342,16 +21342,16 @@ bool DAGCombiner::isAlias(SDNode *Op0, SDNode *Op1) const {
// multiples of the size of the data.
int64_t SrcValOffset0 = MUC0.MMO->getOffset();
int64_t SrcValOffset1 = MUC1.MMO->getOffset();
- unsigned OrigAlignment0 = MUC0.MMO->getBaseAlignment();
- unsigned OrigAlignment1 = MUC1.MMO->getBaseAlignment();
+ Align OrigAlignment0 = MUC0.MMO->getBaseAlign();
+ Align OrigAlignment1 = MUC1.MMO->getBaseAlign();
auto &Size0 = MUC0.NumBytes;
auto &Size1 = MUC1.NumBytes;
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
Size0.hasValue() && Size1.hasValue() && *Size0 == *Size1 &&
OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 &&
SrcValOffset1 % *Size1 == 0) {
- int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
- int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
+ int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
+ int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();
// There is no overlap between these relatively aligned accesses of
// similar size. Return no alias.
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 27440b2dd02c..a84b9d17436a 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1557,7 +1557,7 @@ bool TargetLoweringBase::allowsMemoryAccessForAlignment(
LLVMContext &Context, const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO, bool *Fast) const {
return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
- MMO.getAlignment(), MMO.getFlags(),
+ MMO.getAlign().value(), MMO.getFlags(),
Fast);
}
@@ -1573,7 +1573,7 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const MachineMemOperand &MMO,
bool *Fast) const {
return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
- MMO.getAlignment(), MMO.getFlags(), Fast);
+ MMO.getAlign().value(), MMO.getFlags(), Fast);
}
BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 7f498e606a70..ba243ebdc645 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -444,15 +444,15 @@ static bool isScalarLoadLegal(const MachineInstr &MI) {
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
// There are no extending SMRD/SMEM loads, and they require 4-byte alignment.
- return MMO->getSize() >= 4 && MMO->getAlignment() >= 4 &&
- // Can't do a scalar atomic load.
- !MMO->isAtomic() &&
- // Don't use scalar loads for volatile accesses to non-constant address
- // spaces.
- (IsConst || !MMO->isVolatile()) &&
- // Memory must be known constant, or not written before this load.
- (IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) &&
- AMDGPUInstrInfo::isUniformMMO(MMO);
+ return MMO->getSize() >= 4 && MMO->getAlign() >= Align(4) &&
+ // Can't do a scalar atomic load.
+ !MMO->isAtomic() &&
+ // Don't use scalar loads for volatile accesses to non-constant address
+ // spaces.
+ (IsConst || !MMO->isVolatile()) &&
+ // Memory must be known constant, or not written before this load.
+ (IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) &&
+ AMDGPUInstrInfo::isUniformMMO(MMO);
}
RegisterBankInfo::InstructionMappings
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 2660c83e8abc..3af7dfff4653 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -3777,7 +3777,7 @@ unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
- (*MI.memoperands_begin())->getAlignment() < 8)
+ (*MI.memoperands_begin())->getAlign() < Align(8))
++UOps;
return UOps;
}
@@ -4364,10 +4364,10 @@ int ARMBaseInstrInfo::getOperandLatencyImpl(
return -1;
unsigned DefAlign = DefMI.hasOneMemOperand()
- ? (*DefMI.memoperands_begin())->getAlignment()
+ ? (*DefMI.memoperands_begin())->getAlign().value()
: 0;
unsigned UseAlign = UseMI.hasOneMemOperand()
- ? (*UseMI.memoperands_begin())->getAlignment()
+ ? (*UseMI.memoperands_begin())->getAlign().value()
: 0;
// Get the itinerary's latency if possible, and handle variable_ops.
@@ -4414,10 +4414,12 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
auto *DefMN = cast<MachineSDNode>(DefNode);
unsigned DefAlign = !DefMN->memoperands_empty()
- ? (*DefMN->memoperands_begin())->getAlignment() : 0;
+ ? (*DefMN->memoperands_begin())->getAlign().value()
+ : 0;
auto *UseMN = cast<MachineSDNode>(UseNode);
unsigned UseAlign = !UseMN->memoperands_empty()
- ? (*UseMN->memoperands_begin())->getAlignment() : 0;
+ ? (*UseMN->memoperands_begin())->getAlign().value()
+ : 0;
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
UseMCID, UseIdx, UseAlign);
@@ -4708,7 +4710,7 @@ unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
// Adjust for dynamic def-side opcode variants not captured by the itinerary.
unsigned DefAlign =
- MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0;
+ MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0;
int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
if (Adj >= 0 || (int)Latency > -Adj) {
return Latency + Adj;
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index a0e7a6f2b6ff..381425f30b36 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1608,7 +1608,7 @@ static bool isMemoryOp(const MachineInstr &MI) {
// Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
// not.
- if (MMO.getAlignment() < 4)
+ if (MMO.getAlign() < Align(4))
return false;
// str <undef> could probably be eliminated entirely, but for now we just want
@@ -2183,12 +2183,12 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
(*Op0->memoperands_begin())->isAtomic())
return false;
- unsigned Align = (*Op0->memoperands_begin())->getAlignment();
+ Align Alignment = (*Op0->memoperands_begin())->getAlign();
const Function &Func = MF->getFunction();
- unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
- : 8; // Pre-v6 need 8-byte align
- if (Align < ReqAlign)
+ Align ReqAlign =
+ STI->hasV6Ops() ? TD->getABITypeAlign(Type::getInt64Ty(Func.getContext()))
+ : Align(8); // Pre-v6 need 8-byte align
+ if (Alignment < ReqAlign)
return false;
// Then make sure the immediate offset fits.
diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index c5a62aa33990..c732f0305339 100644
--- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -457,7 +457,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
return false;
if (!MI->hasOneMemOperand() ||
- (*MI->memoperands_begin())->getAlignment() < 4)
+ (*MI->memoperands_begin())->getAlign() < Align(4))
return false;
// We're creating a completely
diff erent type of load/store - LDM from LDR.
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 17834ba24194..1b890ef412e9 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1027,10 +1027,9 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
auto UseAligned = [&] (const MachineInstr &MI, unsigned NeedAlign) {
if (MI.memoperands().empty())
return false;
- return all_of(MI.memoperands(),
- [NeedAlign] (const MachineMemOperand *MMO) {
- return NeedAlign <= MMO->getAlignment();
- });
+ return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
+ return MMO->getAlign() >= NeedAlign;
+ });
};
switch (Opc) {
diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
index d80e0ed50c93..8467760cce01 100644
--- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -688,11 +688,12 @@ void HexagonSplitDoubleRegs::splitMemRef(MachineInstr *MI,
for (auto &MO : MI->memoperands()) {
const MachinePointerInfo &Ptr = MO->getPointerInfo();
MachineMemOperand::Flags F = MO->getFlags();
- int A = MO->getAlignment();
+ Align A = MO->getAlign();
- auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, A);
+ auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/, A.value());
LowI->addMemOperand(MF, Tmp1);
- auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, std::min(A, 4));
+ auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/,
+ std::min(A, Align(4)).value());
HighI->addMemOperand(MF, Tmp2);
}
}
diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
index aab37393ed36..ac584a2f48dd 100644
--- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
@@ -314,7 +314,7 @@ bool HexagonStoreWidening::selectStores(InstrGroup::iterator Begin,
MachineInstr *FirstMI = *Begin;
assert(!FirstMI->memoperands_empty() && "Expecting some memory operands");
const MachineMemOperand &FirstMMO = getStoreTarget(FirstMI);
- unsigned Alignment = FirstMMO.getAlignment();
+ unsigned Alignment = FirstMMO.getAlign().value();
unsigned SizeAccum = FirstMMO.getSize();
unsigned FirstOffset = getStoreOffset(FirstMI);
@@ -416,10 +416,9 @@ bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
MachineInstr *FirstSt = OG.front();
DebugLoc DL = OG.back()->getDebugLoc();
const MachineMemOperand &OldM = getStoreTarget(FirstSt);
- MachineMemOperand *NewM =
- MF->getMachineMemOperand(OldM.getPointerInfo(), OldM.getFlags(),
- TotalSize, OldM.getAlignment(),
- OldM.getAAInfo());
+ MachineMemOperand *NewM = MF->getMachineMemOperand(
+ OldM.getPointerInfo(), OldM.getFlags(), TotalSize,
+ OldM.getAlign().value(), OldM.getAAInfo());
if (Acc < 0x10000) {
// Create mem[hw] = #Acc
diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
index 1a25f84abb72..445ef8541753 100644
--- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
+++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp
@@ -462,7 +462,7 @@ bool MipsInstructionSelector::select(MachineInstr &I) {
}
// Unaligned memory access
- if (MMO->getSize() > MMO->getAlignment() &&
+ if (MMO->getAlign() < MMO->getSize() &&
!STI.systemSupportsUnalignedAccess()) {
if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
return false;
diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
index f288e734d9ff..310e54b0ea8d 100644
--- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp
@@ -52,7 +52,7 @@ bool MipsPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer,
static_cast<const MipsSubtarget &>(MI.getMF()->getSubtarget());
if (!isPowerOf2_64(MMO->getSize()))
return false;
- bool isUnaligned = MMO->getSize() > MMO->getAlignment();
+ bool isUnaligned = MMO->getAlign() < MMO->getSize();
if (!STI.systemSupportsUnalignedAccess() && isUnaligned)
return false;
diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
index 404612621a72..6325e513f9f8 100644
--- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -157,7 +157,7 @@ static bool isGprbTwoInstrUnalignedLoadOrStore(const MachineInstr *MI) {
const MipsSubtarget &STI =
static_cast<const MipsSubtarget &>(MI->getMF()->getSubtarget());
if (MMO->getSize() == 4 && (!STI.systemSupportsUnalignedAccess() &&
- MMO->getSize() > MMO->getAlignment()))
+ MMO->getAlign() < MMO->getSize()))
return true;
}
return false;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 6f5c4efff20f..81540ca4176f 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -13650,8 +13650,8 @@ SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
// Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
// aligned and the type is a vector with elements up to 4 bytes
- if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
- && VecTy.getScalarSizeInBits() <= 32 ) {
+ if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
+ VecTy.getScalarSizeInBits() <= 32) {
return SDValue();
}
@@ -13721,8 +13721,8 @@ SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
// Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
// aligned and the type is a vector with elements up to 4 bytes
- if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16)
- && VecTy.getScalarSizeInBits() <= 32 ) {
+ if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
+ VecTy.getScalarSizeInBits() <= 32) {
return SDValue();
}
diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 3c08fade3b4c..4109bfc11337 100644
--- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -92,9 +92,9 @@ static void lowerAlignmentHint(const MachineInstr *MI, MCInst &LoweredMI,
return;
const MachineMemOperand *MMO = *MI->memoperands_begin();
unsigned AlignmentHint = 0;
- if (MMO->getAlignment() >= 16)
+ if (MMO->getAlign() >= Align(16))
AlignmentHint = 4;
- else if (MMO->getAlignment() >= 8)
+ else if (MMO->getAlign() >= Align(8))
AlignmentHint = 3;
if (AlignmentHint == 0)
return;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
index a249ccf17638..89ae45722e42 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -65,7 +65,7 @@ static void rewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
assert(MI.getDesc().OpInfo[OperandNo].OperandType ==
WebAssembly::OPERAND_P2ALIGN &&
"Load and store instructions should have a p2align operand");
- uint64_t P2Align = Log2_64((*MI.memoperands_begin())->getAlignment());
+ uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign());
// WebAssembly does not currently support supernatural alignment.
P2Align = std::min(P2Align,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 3bfad3d190f5..0371c50dfe27 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -5811,7 +5811,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
// Determine the alignment of the load.
Align Alignment;
if (LoadMI.hasOneMemOperand())
- Alignment = Align((*LoadMI.memoperands_begin())->getAlignment());
+ Alignment = (*LoadMI.memoperands_begin())->getAlign();
else
switch (LoadMI.getOpcode()) {
case X86::AVX512_512_SET0:
@@ -6092,7 +6092,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
Opc = getBroadcastOpcode(I, RC, Subtarget);
} else {
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget);
}
@@ -6169,7 +6169,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
- bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
@@ -6236,7 +6236,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
Opc = getBroadcastOpcode(I, RC, Subtarget);
} else {
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget);
}
@@ -6302,7 +6302,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
- bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
+ bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
SDNode *Store =
DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
dl, MVT::Other, AddrOps);
diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index dcacb650dc44..8916f3521388 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -71,7 +71,7 @@ class X86InstructionSelector : public InstructionSelector {
// TODO: remove after supported by Tablegen-erated instruction selection.
unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
- uint64_t Alignment) const;
+ Align Alignment) const;
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
@@ -394,7 +394,7 @@ bool X86InstructionSelector::select(MachineInstr &I) {
unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
const RegisterBank &RB,
unsigned Opc,
- uint64_t Alignment) const {
+ Align Alignment) const {
bool Isload = (Opc == TargetOpcode::G_LOAD);
bool HasAVX = STI.hasAVX();
bool HasAVX512 = STI.hasAVX512();
@@ -427,7 +427,7 @@ unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
HasAVX ? X86::VMOVSDmr :
X86::MOVSDmr);
} else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
- if (Alignment >= 16)
+ if (Alignment >= Align(16))
return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
: HasAVX512
? X86::VMOVAPSZ128rm_NOVLX
@@ -446,7 +446,7 @@ unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
? X86::VMOVUPSZ128mr_NOVLX
: HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
} else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
- if (Alignment >= 32)
+ if (Alignment >= Align(32))
return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
: HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
: X86::VMOVAPSYrm)
@@ -461,7 +461,7 @@ unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
: HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
: X86::VMOVUPSYmr);
} else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
- if (Alignment >= 64)
+ if (Alignment >= Align(64))
return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
else
return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
@@ -520,13 +520,13 @@ bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
return false;
}
- if (MemOp.getAlignment() < Ty.getSizeInBits()/8) {
+ if (MemOp.getAlign() < Ty.getSizeInBits() / 8) {
LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
return false;
}
}
- unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
+ unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign());
if (NewOpc == Opc)
return false;
@@ -1435,14 +1435,16 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I,
const Register DstReg = I.getOperand(0).getReg();
const LLT DstTy = MRI.getType(DstReg);
const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- unsigned Align = DstTy.getSizeInBytes();
+ Align Alignment = Align(DstTy.getSizeInBytes());
const DebugLoc &DbgLoc = I.getDebugLoc();
- unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
+ unsigned Opc =
+ getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);
// Create the load from the constant pool.
const ConstantFP *CFP = I.getOperand(1).getFPImm();
- unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
+ unsigned CPI =
+ MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment.value());
MachineInstr *LoadInst = nullptr;
unsigned char OpFlag = STI.classifyLocalReference(nullptr);
@@ -1456,7 +1458,7 @@ bool X86InstructionSelector::materializeFP(MachineInstr &I,
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
- MF.getDataLayout().getPointerSize(), Align);
+ MF.getDataLayout().getPointerSize(), Alignment.value());
LoadInst =
addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
More information about the llvm-commits
mailing list