[llvm] 998118c - [Alignment][NFC] Deprecate MachineMemOperand::getMachineMemOperand version that takes an untyped alignement.
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 31 09:05:45 PDT 2020
Author: Guillaume Chatelet
Date: 2020-03-31T16:05:31Z
New Revision: 998118c3d3bd6a394c1da35f7570cce1a3145ea3
URL: https://github.com/llvm/llvm-project/commit/998118c3d3bd6a394c1da35f7570cce1a3145ea3
DIFF: https://github.com/llvm/llvm-project/commit/998118c3d3bd6a394c1da35f7570cce1a3145ea3.diff
LOG: [Alignment][NFC] Deprecate MachineMemOperand::getMachineMemOperand version that takes an untyped alignement.
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: hiraditya, jfb, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77138
Added:
Modified:
llvm/include/llvm/CodeGen/MachineFunction.h
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/lib/Target/ARM/ARMInstructionSelector.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index 6a2045b47066..a1626fe5529a 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -819,12 +819,15 @@ class MachineFunction {
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
/// FIXME: Remove once transition to Align is over.
- inline MachineMemOperand *getMachineMemOperand(
- MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
- unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
- const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
- AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
- AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) {
+ LLVM_ATTRIBUTE_DEPRECATED(
+ inline MachineMemOperand *getMachineMemOperand(
+ MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
+ unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr,
+ SyncScope::ID SSID = SyncScope::System,
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
+ AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic),
+ "Use the version that takes Align instead") {
return getMachineMemOperand(PtrInfo, f, s, Align(base_alignment), AAInfo,
Ranges, SSID, Ordering, FailureOrdering);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 3bfc8545e544..98bbaefbb584 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6679,19 +6679,17 @@ SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
SDValue SelectionDAG::getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
- EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
+ EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
- if (Align == 0) // Ensure that codegen never sees alignment 0
- Align = getEVTAlignment(MemVT);
-
if (!Size && MemVT.isScalableVector())
Size = MemoryLocation::UnknownSize;
else if (!Size)
Size = MemVT.getStoreSize();
MachineFunction &MF = getMachineFunction();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo);
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ PtrInfo, Flags, Size, Alignment ? Align(Alignment) : getEVTAlign(MemVT),
+ AAInfo);
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 042907d34907..26ed6e22af94 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4173,25 +4173,26 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
bool IsCompressing) {
SDLoc sdl = getCurSDLoc();
- auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
- unsigned& Alignment) {
+ auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
+ MaybeAlign &Alignment) {
// llvm.masked.store.*(Src0, Ptr, alignment, Mask)
Src0 = I.getArgOperand(0);
Ptr = I.getArgOperand(1);
- Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
+ Alignment =
+ MaybeAlign(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
Mask = I.getArgOperand(3);
};
- auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
- unsigned& Alignment) {
+ auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
+ MaybeAlign &Alignment) {
// llvm.masked.compressstore.*(Src0, Ptr, Mask)
Src0 = I.getArgOperand(0);
Ptr = I.getArgOperand(1);
Mask = I.getArgOperand(2);
- Alignment = 0;
+ Alignment = None;
};
Value *PtrOperand, *MaskOperand, *Src0Operand;
- unsigned Alignment;
+ MaybeAlign Alignment;
if (IsCompressing)
getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
else
@@ -4204,19 +4205,16 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
EVT VT = Src0.getValueType();
if (!Alignment)
- Alignment = DAG.getEVTAlignment(VT);
+ Alignment = DAG.getEVTAlign(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
- MachineMemOperand *MMO =
- DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(PtrOperand),
- MachineMemOperand::MOStore,
- // TODO: Make MachineMemOperands aware of scalable
- // vectors.
- VT.getStoreSize().getKnownMinSize(),
- Alignment, AAInfo);
+ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
+ // TODO: Make MachineMemOperands aware of scalable
+ // vectors.
+ VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
SDValue StoreNode =
DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
ISD::UNINDEXED, false /* Truncating */, IsCompressing);
@@ -4316,9 +4314,9 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
SDValue Src0 = getValue(I.getArgOperand(0));
SDValue Mask = getValue(I.getArgOperand(3));
EVT VT = Src0.getValueType();
- unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
+ MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
if (!Alignment)
- Alignment = DAG.getEVTAlignment(VT);
+ Alignment = DAG.getEVTAlign(VT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
AAMDNodes AAInfo;
@@ -4331,13 +4329,11 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this);
unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
- MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(AS),
- MachineMemOperand::MOStore,
- // TODO: Make MachineMemOperands aware of scalable
- // vectors.
- MemoryLocation::UnknownSize,
- Alignment, AAInfo);
+ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MachinePointerInfo(AS), MachineMemOperand::MOStore,
+ // TODO: Make MachineMemOperands aware of scalable
+ // vectors.
+ MemoryLocation::UnknownSize, *Alignment, AAInfo);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
@@ -4354,25 +4350,26 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
SDLoc sdl = getCurSDLoc();
- auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
- unsigned& Alignment) {
+ auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
+ MaybeAlign &Alignment) {
// @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
Ptr = I.getArgOperand(0);
- Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+ Alignment =
+ MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
Mask = I.getArgOperand(2);
Src0 = I.getArgOperand(3);
};
- auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
- unsigned& Alignment) {
+ auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
+ MaybeAlign &Alignment) {
// @llvm.masked.expandload.*(Ptr, Mask, Src0)
Ptr = I.getArgOperand(0);
- Alignment = 0;
+ Alignment = None;
Mask = I.getArgOperand(1);
Src0 = I.getArgOperand(2);
};
Value *PtrOperand, *MaskOperand, *Src0Operand;
- unsigned Alignment;
+ MaybeAlign Alignment;
if (IsExpanding)
getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
else
@@ -4385,7 +4382,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
EVT VT = Src0.getValueType();
if (!Alignment)
- Alignment = DAG.getEVTAlignment(VT);
+ Alignment = DAG.getEVTAlign(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
@@ -4403,14 +4400,11 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
- MachineMemOperand *MMO =
- DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(PtrOperand),
- MachineMemOperand::MOLoad,
- // TODO: Make MachineMemOperands aware of scalable
- // vectors.
- VT.getStoreSize().getKnownMinSize(),
- Alignment, AAInfo, Ranges);
+ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
+ // TODO: Make MachineMemOperands aware of scalable
+ // vectors.
+ VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
SDValue Load =
DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
@@ -4430,9 +4424,9 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
- unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
+ MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
if (!Alignment)
- Alignment = DAG.getEVTAlignment(VT);
+ Alignment = DAG.getEVTAlign(VT);
AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
@@ -4445,14 +4439,11 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
SDValue Scale;
bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this);
unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
- MachineMemOperand *MMO =
- DAG.getMachineFunction().
- getMachineMemOperand(MachinePointerInfo(AS),
- MachineMemOperand::MOLoad,
- // TODO: Make MachineMemOperands aware of scalable
- // vectors.
- MemoryLocation::UnknownSize,
- Alignment, AAInfo, Ranges);
+ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
+ MachinePointerInfo(AS), MachineMemOperand::MOLoad,
+ // TODO: Make MachineMemOperands aware of scalable
+ // vectors.
+ MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
@@ -4479,16 +4470,14 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
- auto Alignment = DAG.getEVTAlignment(MemVT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
- Flags, MemVT.getStoreSize(), Alignment,
- AAMDNodes(), nullptr, SSID, SuccessOrdering,
- FailureOrdering);
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
+ DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
+ FailureOrdering);
SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
dl, MemVT, VTs, InChain,
@@ -4527,15 +4516,13 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
SDValue InChain = getRoot();
auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
- auto Alignment = DAG.getEVTAlignment(MemVT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
MachineFunction &MF = DAG.getMachineFunction();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
- MemVT.getStoreSize(), Alignment, AAMDNodes(),
- nullptr, SSID, Ordering);
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
+ DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
SDValue L =
DAG.getAtomic(NT, dl, MemVT, InChain,
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 881335272811..10a540f8bfa6 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -2176,32 +2176,33 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
// Rationale: This sequence saves uop updates compared to a sequence of
// pre-increment spills like stp xi,xj,[sp,#-16]!
// Note: Similar rationale and sequence for restores in epilog.
- unsigned Size, Align;
+ unsigned Size;
+ Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
Size = 8;
- Align = 8;
+ Alignment = Align(8);
break;
case RegPairInfo::FPR64:
StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
Size = 8;
- Align = 8;
+ Alignment = Align(8);
break;
case RegPairInfo::FPR128:
StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
Size = 16;
- Align = 16;
+ Alignment = Align(16);
break;
case RegPairInfo::ZPR:
StrOpc = AArch64::STR_ZXI;
Size = 16;
- Align = 16;
+ Alignment = Align(16);
break;
case RegPairInfo::PPR:
StrOpc = AArch64::STR_PXI;
Size = 2;
- Align = 2;
+ Alignment = Align(2);
break;
}
LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
@@ -2230,7 +2231,7 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
- MachineMemOperand::MOStore, Size, Align));
+ MachineMemOperand::MOStore, Size, Alignment));
}
MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
.addReg(AArch64::SP)
@@ -2238,8 +2239,8 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
// where factor*scale is implicit
.setMIFlag(MachineInstr::FrameSetup);
MIB.addMemOperand(MF.getMachineMemOperand(
- MachinePointerInfo::getFixedStack(MF,FrameIdxReg1),
- MachineMemOperand::MOStore, Size, Align));
+ MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
+ MachineMemOperand::MOStore, Size, Alignment));
if (NeedsWinCFI)
InsertSEH(MIB, TII, MachineInstr::FrameSetup);
@@ -2281,32 +2282,33 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
// ldp x22, x21, [sp, #0] // addImm(+0)
// Note: see comment in spillCalleeSavedRegisters()
unsigned LdrOpc;
- unsigned Size, Align;
+ unsigned Size;
+ Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
Size = 8;
- Align = 8;
+ Alignment = Align(8);
break;
case RegPairInfo::FPR64:
LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
Size = 8;
- Align = 8;
+ Alignment = Align(8);
break;
case RegPairInfo::FPR128:
LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
Size = 16;
- Align = 16;
+ Alignment = Align(16);
break;
case RegPairInfo::ZPR:
LdrOpc = AArch64::LDR_ZXI;
Size = 16;
- Align = 16;
+ Alignment = Align(16);
break;
case RegPairInfo::PPR:
LdrOpc = AArch64::LDR_PXI;
Size = 2;
- Align = 2;
+ Alignment = Align(2);
break;
}
LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
@@ -2329,7 +2331,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
MIB.addReg(Reg2, getDefRegState(true));
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
- MachineMemOperand::MOLoad, Size, Align));
+ MachineMemOperand::MOLoad, Size, Alignment));
}
MIB.addReg(Reg1, getDefRegState(true))
.addReg(AArch64::SP)
@@ -2338,7 +2340,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
.setMIFlag(MachineInstr::FrameDestroy);
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
- MachineMemOperand::MOLoad, Size, Align));
+ MachineMemOperand::MOLoad, Size, Alignment));
if (NeedsWinCFI)
InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
};
diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
index 39b16a4065c5..e8d49cbbea32 100644
--- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
+++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp
@@ -627,7 +627,7 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
bool UseMovt = STI.useMovt();
unsigned Size = TM.getPointerSize(0);
- unsigned Alignment = 4;
+ const Align Alignment(4);
auto addOpsForConstantPoolLoad = [&MF, Alignment,
Size](MachineInstrBuilder &MIB,
@@ -639,10 +639,10 @@ bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
auto CPIndex =
// For SB relative entries we need a target-specific constant pool.
// Otherwise, just use a regular constant pool entry.
- IsSBREL
- ? ConstPool->getConstantPoolIndex(
- ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment)
- : ConstPool->getConstantPoolIndex(GV, Alignment);
+ IsSBREL ? ConstPool->getConstantPoolIndex(
+ ARMConstantPoolConstant::Create(GV, ARMCP::SBREL),
+ Alignment.value())
+ : ConstPool->getConstantPoolIndex(GV, Alignment.value());
MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
@@ -990,14 +990,14 @@ bool ARMInstructionSelector::select(MachineInstr &I) {
case G_FCONSTANT: {
// Load from constant pool
unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8;
- unsigned Alignment = Size;
+ Align Alignment(Size);
assert((Size == 4 || Size == 8) && "Unsupported FP constant type");
auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD;
auto ConstPool = MF.getConstantPool();
- auto CPIndex =
- ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment);
+ auto CPIndex = ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(),
+ Alignment.value());
MIB->setDesc(TII.get(LoadOpcode));
MIB->RemoveOperand(1);
MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
More information about the llvm-commits
mailing list