[clang] [llvm] Use unaligned atomic load and stores on x86 (PR #79191)
via cfe-commits
cfe-commits at lists.llvm.org
Tue Jan 23 11:07:47 PST 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/79191
>From 4716a62d936ee174b6ffd0a4a7f9f7fbc5f2ae0f Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Tue, 23 Jan 2024 13:59:05 -0500
Subject: [PATCH] Use unaligned atomic load and stores on x86
The backend supports it now, so we can use it.
---
clang/lib/CodeGen/CGObjC.cpp | 5 +++--
llvm/lib/Target/X86/X86ISelLowering.cpp | 1 +
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 03fc0ec7ff54e1..debfc84f49e484 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -846,8 +846,9 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
/// accesses. They don't have to be fast, just faster than a function
/// call and a mutex.
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
- // FIXME: Allow unaligned atomic load/store on x86. (It is not
- // currently supported by the backend.)
+ // x86 is the only one so far that we know support this as of now
+ if (arch == llvm::Triple::x86 || arch == llvm::Triple::x86_64)
+ return true;
return false;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e158312caffdec..9b5128cc136114 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -107,6 +107,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setSchedulingPreference(Sched::RegPressure);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
+ setSupportsUnalignedAtomics(true);
// Bypass expensive divides and use cheaper ones.
if (TM.getOptLevel() >= CodeGenOptLevel::Default) {
More information about the cfe-commits
mailing list