[PATCH] D124218: [LoadStoreVectorizer] Consider if operation is faster than before
Stanislav Mekhanoshin via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 21 17:17:14 PDT 2022
rampitec created this revision.
rampitec added reviewers: arsenm, foad.
Herald added subscribers: hiraditya, tpr.
Herald added a project: All.
rampitec requested review of this revision.
Herald added a subscriber: wdng.
Herald added a project: LLVM.
Compare a relative speed of misaligned accesses before and
after vectorization, not just check the new instruction is
not going to be slower.
Since no target now returns anything but 0 or 1 for Fast
argument of the allowsMisalignedMemoryAccesses this is still NFCI.
The subsequent patch will tune actual vaues of Fast on AMDGPU.
https://reviews.llvm.org/D124218
Files:
llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
Index: llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
===================================================================
--- llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -187,7 +187,7 @@
/// Check if this load/store access is misaligned accesses.
bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- Align Alignment);
+ Align Alignment, unsigned &Fast);
};
class LoadStoreVectorizerLegacyPass : public FunctionPass {
@@ -1078,8 +1078,14 @@
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the store is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
+ unsigned Fast;
+ if (accessIsMisaligned(SzInBytes, AS, Alignment, Fast)) {
if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
+ unsigned FastBefore;
+ accessIsMisaligned(EltSzInBytes, AS, Alignment, FastBefore);
+ if (FastBefore > Fast)
+ return false;
+
auto Chains = splitOddVectorElts(Chain, Sz);
bool Vectorized = false;
Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed);
@@ -1231,8 +1237,14 @@
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the load is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
+ unsigned Fast;
+ if (accessIsMisaligned(SzInBytes, AS, Alignment, Fast)) {
if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
+ unsigned FastBefore;
+ accessIsMisaligned(EltSzInBytes, AS, Alignment, FastBefore);
+ if (FastBefore > Fast)
+ return false;
+
auto Chains = splitOddVectorElts(Chain, Sz);
bool Vectorized = false;
Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed);
@@ -1316,11 +1328,11 @@
}
bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- Align Alignment) {
+ Align Alignment, unsigned &Fast) {
+ Fast = 0;
if (Alignment.value() % SzInBytes == 0)
return false;
- unsigned Fast = false;
bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
SzInBytes * 8, AddressSpace,
Alignment, &Fast);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D124218.424337.patch
Type: text/x-patch
Size: 2482 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20220422/c7c36d4c/attachment.bin>
More information about the llvm-commits
mailing list