[llvm] 89e767f - [LoopIdiom] Move up atomic checks for memcpy/memmove (NFC) (#124535)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 29 02:21:49 PST 2025
Author: Ryotaro Kasuga
Date: 2025-01-29T19:21:45+09:00
New Revision: 89e767f1277b43862ca417810f23f70596536b81
URL: https://github.com/llvm/llvm-project/commit/89e767f1277b43862ca417810f23f70596536b81
DIFF: https://github.com/llvm/llvm-project/commit/89e767f1277b43862ca417810f23f70596536b81.diff
LOG: [LoopIdiom] Move up atomic checks for memcpy/memmove (NFC) (#124535)
This patch moves up the checks that verify if it is legal to replace the
atomic load/store with memcpy. Currently these checks are done after we
determine to convert the load/store to memcpy/memmove, which makes the
logic a bit confusing.
This patch is a prelude to #50892
Added:
Modified:
llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index c5091e731444efc..2462ec33e0c2022 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -1358,7 +1358,29 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
return Changed;
}
+ bool IsAtomic = TheStore->isAtomic() || TheLoad->isAtomic();
bool UseMemMove = IsMemCpy ? Verifier.IsSameObject : LoopAccessStore;
+
+ if (IsAtomic) {
+ // For now don't support unordered atomic memmove.
+ if (UseMemMove)
+ return Changed;
+
+ // We cannot allow unaligned ops for unordered load/store, so reject
+ // anything where the alignment isn't at least the element size.
+ assert((StoreAlign && LoadAlign) &&
+ "Expect unordered load/store to have align.");
+ if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
+ return Changed;
+
+ // If the element.atomic memcpy is not lowered into explicit
+ // loads/stores later, then it will be lowered into an element-size
+ // specific lib call. If the lib call doesn't exist for our store size, then
+ // we shouldn't generate the memcpy.
+ if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
+ return Changed;
+ }
+
if (UseMemMove)
if (!Verifier.loadAndStoreMayFormMemmove(StoreSize, IsNegStride, *TheLoad,
IsMemCpy))
@@ -1387,7 +1409,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
// Check whether to generate an unordered atomic memcpy:
// If the load or store are atomic, then they must necessarily be unordered
// by previous checks.
- if (!TheStore->isAtomic() && !TheLoad->isAtomic()) {
+ if (!IsAtomic) {
if (UseMemMove)
NewCall = Builder.CreateMemMove(
StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
@@ -1398,23 +1420,6 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
NumBytes, /*isVolatile=*/false, AATags.TBAA,
AATags.TBAAStruct, AATags.Scope, AATags.NoAlias);
} else {
- // For now don't support unordered atomic memmove.
- if (UseMemMove)
- return Changed;
- // We cannot allow unaligned ops for unordered load/store, so reject
- // anything where the alignment isn't at least the element size.
- assert((StoreAlign && LoadAlign) &&
- "Expect unordered load/store to have align.");
- if (*StoreAlign < StoreSize || *LoadAlign < StoreSize)
- return Changed;
-
- // If the element.atomic memcpy is not lowered into explicit
- // loads/stores later, then it will be lowered into an element-size
- // specific lib call. If the lib call doesn't exist for our store size, then
- // we shouldn't generate the memcpy.
- if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
- return Changed;
-
// Create the call.
// Note that unordered atomic loads/stores are *required* by the spec to
// have an alignment but non-atomic loads/stores may not.
More information about the llvm-commits
mailing list