[llvm] r363048 - [TargetLowering] Add allowsMemoryAccess(MachineMemOperand) helper wrapper. NFCI.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 11 04:00:24 PDT 2019
Author: rksimon
Date: Tue Jun 11 04:00:23 2019
New Revision: 363048
URL: http://llvm.org/viewvc/llvm-project?rev=363048&view=rev
Log:
[TargetLowering] Add allowsMemoryAccess(MachineMemOperand) helper wrapper. NFCI.
As suggested by @arsenm on D63075 - this adds a TargetLowering::allowsMemoryAccess wrapper that takes a Load/Store node's MachineMemOperand to handle the AddressSpace/Alignment arguments and will also implicitly handle the MachineMemOperand::Flags change in D63075.
Modified:
llvm/trunk/include/llvm/CodeGen/TargetLowering.h
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
Modified: llvm/trunk/include/llvm/CodeGen/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/TargetLowering.h?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/TargetLowering.h (original)
+++ llvm/trunk/include/llvm/CodeGen/TargetLowering.h Tue Jun 11 04:00:23 2019
@@ -1430,6 +1430,14 @@ public:
unsigned AddrSpace = 0, unsigned Alignment = 1,
bool *Fast = nullptr) const;
+ /// Return true if the target supports a memory access of this type for the
+ /// given MachineMemOperand. If the access is allowed, the optional
+ /// final parameter returns if the access is also fast (as defined by the
+ /// target).
+ bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
+ const MachineMemOperand &MMO,
+ bool *Fast = nullptr) const;
+
/// Returns the target specific optimal type for load and store operations as
/// a result of memset, memcpy, and memmove lowering.
///
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Jun 11 04:00:23 2019
@@ -6439,9 +6439,9 @@ SDValue DAGCombiner::MatchStoreCombine(S
// Check that a store of the wide type is both allowed and fast on the target
bool Fast = false;
- bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
- VT, FirstStore->getAddressSpace(),
- FirstStore->getAlignment(), &Fast);
+ bool Allowed =
+ TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ *FirstStore->getMemOperand(), &Fast);
if (!Allowed || !Fast)
return SDValue();
@@ -6604,8 +6604,7 @@ SDValue DAGCombiner::MatchLoadCombine(SD
// Check that a load of the wide type is both allowed and fast on the target
bool Fast = false;
bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
- VT, FirstLoad->getAddressSpace(),
- FirstLoad->getAlignment(), &Fast);
+ VT, *FirstLoad->getMemOperand(), &Fast);
if (!Allowed || !Fast)
return SDValue();
@@ -10828,15 +10827,14 @@ SDValue DAGCombiner::visitBITCAST(SDNode
TLI.isOperationLegal(ISD::LOAD, VT)) &&
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- unsigned OrigAlign = LN0->getAlignment();
bool Fast = false;
if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- LN0->getAddressSpace(), OrigAlign, &Fast) &&
+ *LN0->getMemOperand(), &Fast) &&
Fast) {
SDValue Load =
DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
- LN0->getPointerInfo(), OrigAlign,
+ LN0->getPointerInfo(), LN0->getAlignment(),
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
return Load;
@@ -15439,8 +15437,8 @@ bool DAGCombiner::MergeConsecutiveStores
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFast) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstInChain->getMemOperand(), &IsFast) &&
IsFast) {
LastIntegerTrunc = false;
LastLegalType = i + 1;
@@ -15451,8 +15449,9 @@ bool DAGCombiner::MergeConsecutiveStores
TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFast) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstInChain->getMemOperand(),
+ &IsFast) &&
IsFast) {
LastIntegerTrunc = true;
LastLegalType = i + 1;
@@ -15470,8 +15469,8 @@ bool DAGCombiner::MergeConsecutiveStores
EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
- FirstStoreAlign, &IsFast) &&
+ TLI.allowsMemoryAccess(
+ Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) &&
IsFast)
LastLegalVectorType = i + 1;
}
@@ -15542,8 +15541,8 @@ bool DAGCombiner::MergeConsecutiveStores
if (TLI.isTypeLegal(Ty) &&
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
- FirstStoreAlign, &IsFast) &&
+ TLI.allowsMemoryAccess(Context, DL, Ty,
+ *FirstInChain->getMemOperand(), &IsFast) &&
IsFast)
NumStoresToMerge = i + 1;
}
@@ -15634,7 +15633,6 @@ bool DAGCombiner::MergeConsecutiveStores
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
- unsigned FirstLoadAS = FirstLoad->getAddressSpace();
unsigned FirstLoadAlign = FirstLoad->getAlignment();
// Scan the memory operations on the chain and find the first
@@ -15674,11 +15672,11 @@ bool DAGCombiner::MergeConsecutiveStores
bool IsFastSt, IsFastLd;
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFastSt) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstInChain->getMemOperand(), &IsFastSt) &&
IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
- FirstLoadAlign, &IsFastLd) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalVectorType = i + 1;
}
@@ -15688,11 +15686,11 @@ bool DAGCombiner::MergeConsecutiveStores
StoreTy = EVT::getIntegerVT(Context, SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFastSt) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstInChain->getMemOperand(), &IsFastSt) &&
IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
- FirstLoadAlign, &IsFastLd) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalIntegerType = i + 1;
DoIntegerTruncate = false;
@@ -15707,11 +15705,12 @@ bool DAGCombiner::MergeConsecutiveStores
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy,
StoreTy) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
- FirstStoreAlign, &IsFastSt) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstInChain->getMemOperand(),
+ &IsFastSt) &&
IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
- FirstLoadAlign, &IsFastLd) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy,
+ *FirstLoad->getMemOperand(), &IsFastLd) &&
IsFastLd) {
LastLegalIntegerType = i + 1;
DoIntegerTruncate = true;
@@ -15962,13 +15961,12 @@ SDValue DAGCombiner::visitSTORE(SDNode *
if (((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegal(ISD::STORE, SVT)) &&
TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) {
- unsigned OrigAlign = ST->getAlignment();
bool Fast = false;
if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT,
- ST->getAddressSpace(), OrigAlign, &Fast) &&
+ *ST->getMemOperand(), &Fast) &&
Fast) {
return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
- ST->getPointerInfo(), OrigAlign,
+ ST->getPointerInfo(), ST->getAlignment(),
ST->getMemOperand()->getFlags(), ST->getAAInfo());
}
}
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Tue Jun 11 04:00:23 2019
@@ -492,10 +492,9 @@ void SelectionDAGLegalize::LegalizeStore
// If this is an unaligned store and the target doesn't support it,
// expand it.
EVT MemVT = ST->getMemoryVT();
- unsigned AS = ST->getAddressSpace();
- unsigned Align = ST->getAlignment();
const DataLayout &DL = DAG.getDataLayout();
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
+ *ST->getMemOperand())) {
LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
@@ -607,11 +606,10 @@ void SelectionDAGLegalize::LegalizeStore
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
EVT MemVT = ST->getMemoryVT();
- unsigned AS = ST->getAddressSpace();
- unsigned Align = ST->getAlignment();
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
+ *ST->getMemOperand())) {
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
}
@@ -668,13 +666,12 @@ void SelectionDAGLegalize::LegalizeLoadO
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
EVT MemVT = LD->getMemoryVT();
- unsigned AS = LD->getAddressSpace();
- unsigned Align = LD->getAlignment();
const DataLayout &DL = DAG.getDataLayout();
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
- std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
+ *LD->getMemOperand())) {
+ std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
}
break;
}
@@ -860,10 +857,9 @@ void SelectionDAGLegalize::LegalizeLoadO
// If this is an unaligned load and the target doesn't support it,
// expand it.
EVT MemVT = LD->getMemoryVT();
- unsigned AS = LD->getAddressSpace();
- unsigned Align = LD->getAlignment();
const DataLayout &DL = DAG.getDataLayout();
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
+ *LD->getMemOperand())) {
std::tie(Value, Chain) = TLI.expandUnalignedLoad(LD, DAG);
}
}
Modified: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp Tue Jun 11 04:00:23 2019
@@ -1482,6 +1482,14 @@ bool TargetLoweringBase::allowsMemoryAcc
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
}
+bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
+ const DataLayout &DL, EVT VT,
+ const MachineMemOperand &MMO,
+ bool *Fast) const {
+ return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
+ MMO.getAlignment(), Fast);
+}
+
BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
return BranchProbability(MinPercentageForPredictableBranch, 100);
}
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Tue Jun 11 04:00:23 2019
@@ -6769,14 +6769,15 @@ SDValue SITargetLowering::LowerLOAD(SDVa
assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
"Custom lowering for non-i32 vectors hasn't been implemented.");
- unsigned Alignment = Load->getAlignment();
- unsigned AS = Load->getAddressSpace();
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- AS, Alignment)) {
+ *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
}
+
+ unsigned Alignment = Load->getAlignment();
+ unsigned AS = Load->getAddressSpace();
if (Subtarget->hasLDSMisalignedBug() &&
AS == AMDGPUAS::FLAT_ADDRESS &&
Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
@@ -7237,12 +7238,12 @@ SDValue SITargetLowering::LowerSTORE(SDV
assert(VT.isVector() &&
Store->getValue().getValueType().getScalarType() == MVT::i32);
- unsigned AS = Store->getAddressSpace();
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- AS, Store->getAlignment())) {
+ *Store->getMemOperand())) {
return expandUnalignedStore(Store, DAG);
}
+ unsigned AS = Store->getAddressSpace();
if (Subtarget->hasLDSMisalignedBug() &&
AS == AMDGPUAS::FLAT_ADDRESS &&
Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
Modified: llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonISelLowering.cpp Tue Jun 11 04:00:23 2019
@@ -2620,7 +2620,6 @@ HexagonTargetLowering::LowerUnalignedLoa
const SDLoc &dl(Op);
const DataLayout &DL = DAG.getDataLayout();
LLVMContext &Ctx = *DAG.getContext();
- unsigned AS = LN->getAddressSpace();
// If the load aligning is disabled or the load can be broken up into two
// smaller legal loads, do the default (target-independent) expansion.
@@ -2630,15 +2629,15 @@ HexagonTargetLowering::LowerUnalignedLoa
DoDefault = true;
if (!AlignLoads) {
- if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), AS, HaveAlign))
+ if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), *LN->getMemOperand()))
return Op;
DoDefault = true;
}
- if (!DoDefault && 2*HaveAlign == NeedAlign) {
+ if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
// The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
- MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8*HaveAlign)
+ MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
: MVT::getVectorVT(MVT::i8, HaveAlign);
- DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, AS, HaveAlign);
+ DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, *LN->getMemOperand());
}
if (DoDefault) {
std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
Modified: llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/NVPTX/NVPTXISelLowering.cpp Tue Jun 11 04:00:23 2019
@@ -2231,7 +2231,7 @@ SDValue NVPTXTargetLowering::LowerLOAD(S
LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT MemVT = Load->getMemoryVT();
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- Load->getAddressSpace(), Load->getAlignment())) {
+ *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, SDLoc(Op));
@@ -2274,7 +2274,7 @@ SDValue NVPTXTargetLowering::LowerSTORE(
// stores and have to handle it here.
if (VT == MVT::v2f16 &&
!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- Store->getAddressSpace(), Store->getAlignment()))
+ *Store->getMemOperand()))
return expandUnalignedStore(Store, DAG);
if (VT.isVector())
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=363048&r1=363047&r2=363048&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jun 11 04:00:23 2019
@@ -38976,13 +38976,12 @@ static SDValue combineLoad(SDNode *N, Se
// pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
ISD::LoadExtType Ext = Ld->getExtensionType();
bool Fast;
- unsigned AddressSpace = Ld->getAddressSpace();
unsigned Alignment = Ld->getAlignment();
if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
Ext == ISD::NON_EXTLOAD &&
((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
(TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
- AddressSpace, Alignment, &Fast) && !Fast))) {
+ *Ld->getMemOperand(), &Fast) && !Fast))) {
unsigned NumElems = RegVT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
@@ -39492,11 +39491,9 @@ static SDValue combineStore(SDNode *N, S
// If we are saving a concatenation of two XMM registers and 32-byte stores
// are slow, such as on Sandy Bridge, perform two 16-byte stores.
bool Fast;
- unsigned AddressSpace = St->getAddressSpace();
- unsigned Alignment = St->getAlignment();
if (VT.is256BitVector() && StVT == VT &&
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- AddressSpace, Alignment, &Fast) &&
+ *St->getMemOperand(), &Fast) &&
!Fast) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
@@ -42990,11 +42987,9 @@ static SDValue combineConcatVectorOps(co
// If needed, look through bitcasts to get to the load.
if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
bool Fast;
- unsigned Alignment = FirstLd->getAlignment();
- unsigned AS = FirstLd->getAddressSpace();
const X86TargetLowering *TLI = Subtarget.getTargetLowering();
- if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, AS,
- Alignment, &Fast) &&
+ if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ *FirstLd->getMemOperand(), &Fast) &&
Fast) {
if (SDValue Ld =
EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
More information about the llvm-commits
mailing list