[llvm] efa94cf - [Support/rpmalloc] Updated fake atomics with Interlocked functions (#148303)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 15 11:42:30 PDT 2025
Author: Dmitry Vasilyev
Date: 2025-07-15T22:42:27+04:00
New Revision: efa94cf703a510083f411444650f3a8d21dfcfee
URL: https://github.com/llvm/llvm-project/commit/efa94cf703a510083f411444650f3a8d21dfcfee
DIFF: https://github.com/llvm/llvm-project/commit/efa94cf703a510083f411444650f3a8d21dfcfee.diff
LOG: [Support/rpmalloc] Updated fake atomics with Interlocked functions (#148303)
Most atomic functions used Interlocked functions in case of MSVC (since MSVC does not do C11 yet).
But few load/store functions are dummy.
Use Interlocked functions for these atomics to ensure they are thread-safe.
This PR fixes #146205.
LLVM is on VS 2019 version 16.7 currently and eventually we require VS 2022 if we wanted to use stdatomics in rpmalloc, etc. In the meanwhile, we use the Interlocked intrinsics when building with MSVC.
Added:
Modified:
llvm/lib/Support/rpmalloc/rpmalloc.c
Removed:
################################################################################
diff --git a/llvm/lib/Support/rpmalloc/rpmalloc.c b/llvm/lib/Support/rpmalloc/rpmalloc.c
index a06d3cdb5b52e..6f8b29e31e8ca 100644
--- a/llvm/lib/Support/rpmalloc/rpmalloc.c
+++ b/llvm/lib/Support/rpmalloc/rpmalloc.c
@@ -275,9 +275,11 @@ typedef volatile long atomic32_t;
typedef volatile long long atomic64_t;
typedef volatile void *atomicptr_t;
-static FORCEINLINE int32_t atomic_load32(atomic32_t *src) { return *src; }
+static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {
+ return (int32_t)InterlockedOr(src, 0);
+}
static FORCEINLINE void atomic_store32(atomic32_t *dst, int32_t val) {
- *dst = val;
+ InterlockedExchange(dst, val);
}
static FORCEINLINE int32_t atomic_incr32(atomic32_t *val) {
return (int32_t)InterlockedIncrement(val);
@@ -293,20 +295,22 @@ static FORCEINLINE int atomic_cas32_acquire(atomic32_t *dst, int32_t val,
return (InterlockedCompareExchange(dst, val, ref) == ref) ? 1 : 0;
}
static FORCEINLINE void atomic_store32_release(atomic32_t *dst, int32_t val) {
- *dst = val;
+ InterlockedExchange(dst, val);
+}
+static FORCEINLINE int64_t atomic_load64(atomic64_t *src) {
+ return (int64_t)InterlockedOr64(src, 0);
}
-static FORCEINLINE int64_t atomic_load64(atomic64_t *src) { return *src; }
static FORCEINLINE int64_t atomic_add64(atomic64_t *val, int64_t add) {
return (int64_t)InterlockedExchangeAdd64(val, add) + add;
}
static FORCEINLINE void *atomic_load_ptr(atomicptr_t *src) {
- return (void *)*src;
+ return InterlockedCompareExchangePointer(src, 0, 0);
}
static FORCEINLINE void atomic_store_ptr(atomicptr_t *dst, void *val) {
- *dst = val;
+ InterlockedExchangePointer(dst, val);
}
static FORCEINLINE void atomic_store_ptr_release(atomicptr_t *dst, void *val) {
- *dst = val;
+ InterlockedExchangePointer(dst, val);
}
static FORCEINLINE void *atomic_exchange_ptr_acquire(atomicptr_t *dst,
void *val) {
More information about the llvm-commits
mailing list