[compiler-rt] 558ab65 - [scudo] Select stricter atomic memory_order in MemMapFuchsia
Fabio D'Urso via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 4 09:31:17 PDT 2023
Author: Fabio D'Urso
Date: 2023-08-04T18:22:44+02:00
New Revision: 558ab653257fc8b5c1a499ddeb4319821cc170da
URL: https://github.com/llvm/llvm-project/commit/558ab653257fc8b5c1a499ddeb4319821cc170da
DIFF: https://github.com/llvm/llvm-project/commit/558ab653257fc8b5c1a499ddeb4319821cc170da.diff
LOG: [scudo] Select stricter atomic memory_order in MemMapFuchsia
Previously, all operations were relaxed except for the
atomic_compare_exchange call, which was implicitly acquire (but did not
compile anymore because of the changes in commit
3ef766addadd8324f58c0fda0301edcde2185cb3).
In addition to making MemMapFuchsia compile again, this CL selects
memory_order values that better express the intent of the code.
Reviewed By: Chia-hungDuan
Differential Revision: https://reviews.llvm.org/D157097
Added:
Modified:
compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
index 9ace1fef7ad4a2..0566ab0655263e 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -41,7 +41,7 @@ static void setVmoName(zx_handle_t Vmo, const char *Name) {
static uptr getRootVmarBase() {
static atomic_uptr CachedResult = {0};
- uptr Result = atomic_load_relaxed(&CachedResult);
+ uptr Result = atomic_load(&CachedResult, memory_order_acquire);
if (UNLIKELY(!Result)) {
zx_info_vmar_t VmarInfo;
zx_status_t Status =
@@ -50,7 +50,7 @@ static uptr getRootVmarBase() {
CHECK_EQ(Status, ZX_OK);
CHECK_NE(VmarInfo.base, 0);
- atomic_store_relaxed(&CachedResult, VmarInfo.base);
+ atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
Result = VmarInfo.base;
}
@@ -61,7 +61,7 @@ static uptr getRootVmarBase() {
static zx_handle_t getPlaceholderVmo() {
static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
- zx_handle_t Vmo = atomic_load_relaxed(&StoredVmo);
+ zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
// Create a zero-sized placeholder VMO.
zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
@@ -72,9 +72,9 @@ static zx_handle_t getPlaceholderVmo() {
// Atomically store its handle. If some other thread wins the race, use its
// handle and discard ours.
- zx_handle_t OldValue =
- atomic_compare_exchange(&StoredVmo, ZX_HANDLE_INVALID, Vmo);
- if (OldValue != ZX_HANDLE_INVALID) {
+ zx_handle_t OldValue = atomic_compare_exchange_strong(
+ &StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
+ if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
Status = _zx_handle_close(Vmo);
CHECK_EQ(Status, ZX_OK);
More information about the llvm-commits
mailing list