[llvm] c466345 - [LoadStoreVectorizer] Consider if operation is faster than before
Stanislav Mekhanoshin via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 28 15:52:49 PST 2022
Author: Stanislav Mekhanoshin
Date: 2022-11-28T15:52:32-08:00
New Revision: c46634554d507b6b764eeba881ed4eef3b28baac
URL: https://github.com/llvm/llvm-project/commit/c46634554d507b6b764eeba881ed4eef3b28baac
DIFF: https://github.com/llvm/llvm-project/commit/c46634554d507b6b764eeba881ed4eef3b28baac.diff
LOG: [LoadStoreVectorizer] Consider if operation is faster than before
Compare a relative speed of misaligned accesses before and
after vectorization, not just check the new instruction is
not going to be slower.
Since no target now returns anything but 0 or 1 for Fast
argument of the allowsMisalignedMemoryAccesses this is still NFCI.
The subsequent patch will tune actual vaues of Fast on AMDGPU.
Differential Revision: https://reviews.llvm.org/D124218
Added:
Modified:
llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index a0380518015b2..0b7fc853dc1b0 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -186,8 +186,11 @@ class Vectorizer {
SmallPtrSet<Instruction *, 16> *InstructionsProcessed);
/// Check if this load/store access is misaligned accesses.
+ /// Returns a \p RelativeSpeed of an operation if allowed suitable to
+ /// compare to another result for the same \p AddressSpace and potentially
+ ///
diff erent \p Alignment and \p SzInBytes.
bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- Align Alignment);
+ Align Alignment, unsigned &RelativeSpeed);
};
class LoadStoreVectorizerLegacyPass : public FunctionPass {
@@ -1078,8 +1081,14 @@ bool Vectorizer::vectorizeStoreChain(
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the store is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
+ unsigned RelativeSpeed;
+ if (accessIsMisaligned(SzInBytes, AS, Alignment, RelativeSpeed)) {
if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
+ unsigned SpeedBefore;
+ accessIsMisaligned(EltSzInBytes, AS, Alignment, SpeedBefore);
+ if (SpeedBefore > RelativeSpeed)
+ return false;
+
auto Chains = splitOddVectorElts(Chain, Sz);
bool Vectorized = false;
Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed);
@@ -1231,8 +1240,14 @@ bool Vectorizer::vectorizeLoadChain(
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the load is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
+ unsigned RelativeSpeed;
+ if (accessIsMisaligned(SzInBytes, AS, Alignment, RelativeSpeed)) {
if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
+ unsigned SpeedBefore;
+ accessIsMisaligned(EltSzInBytes, AS, Alignment, SpeedBefore);
+ if (SpeedBefore > RelativeSpeed)
+ return false;
+
auto Chains = splitOddVectorElts(Chain, Sz);
bool Vectorized = false;
Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed);
@@ -1316,15 +1331,15 @@ bool Vectorizer::vectorizeLoadChain(
}
bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- Align Alignment) {
+ Align Alignment, unsigned &RelativeSpeed) {
+ RelativeSpeed = 0;
if (Alignment.value() % SzInBytes == 0)
return false;
- unsigned Fast = 0;
bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
SzInBytes * 8, AddressSpace,
- Alignment, &Fast);
+ Alignment, &RelativeSpeed);
LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows
- << " and fast? " << Fast << "\n";);
- return !Allows || !Fast;
+ << " with relative speed = " << RelativeSpeed << '\n';);
+ return !Allows || !RelativeSpeed;
}
More information about the llvm-commits
mailing list