[llvm] X86: Avoid using isArch64Bit for 64-bit checks (PR #157412)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 8 02:53:24 PDT 2025
https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/157412
Just directly check x86_64. isArch64Bit just adds extra
steps around this.
>From 885e8e680aa31f0d30cdd2f41c19167991a0b4fd Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 8 Sep 2025 17:34:44 +0900
Subject: [PATCH] X86: Avoid using isArch64Bit for 64-bit checks
Just directly check x86_64. isArch64Bit just adds extra
steps around this.
---
llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp | 2 +-
llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp | 2 +-
llvm/lib/Target/X86/X86AsmPrinter.cpp | 7 ++++---
llvm/lib/Target/X86/X86RegisterInfo.cpp | 11 ++++++-----
llvm/lib/Target/X86/X86TargetMachine.cpp | 11 ++++++-----
5 files changed, 18 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 865fc0ce8101b..f01805919b9bc 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -1286,7 +1286,7 @@ class DarwinX86AsmBackend : public X86AsmBackend {
DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI)
: X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
- Is64Bit(TT.isArch64Bit()) {
+ Is64Bit(TT.getArch() == Triple::x86_64) {
memset(SavedRegs, 0, sizeof(SavedRegs));
OffsetSize = Is64Bit ? 8 : 4;
MoveInstrSize = Is64Bit ? 3 : 2;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index bb1e716c33ed5..b663e57b3c759 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -48,7 +48,7 @@ std::string X86_MC::ParseX86Triple(const Triple &TT) {
std::string FS;
// SSE2 should default to enabled in 64-bit mode, but can be turned off
// explicitly.
- if (TT.isArch64Bit())
+ if (TT.getArch() == Triple::x86_64)
FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
else if (TT.getEnvironment() != Triple::CODE16)
FS = "-64bit-mode,+32bit-mode,-16bit-mode";
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index d406277e440bb..5ea3ed062f363 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -194,7 +194,7 @@ void X86AsmPrinter::emitKCFITypeId(const MachineFunction &MF) {
if (F.getParent()->getModuleFlag("kcfi-arity")) {
// The ArityToRegMap assumes the 64-bit SysV ABI.
[[maybe_unused]] const auto &Triple = MF.getTarget().getTargetTriple();
- assert(Triple.isArch64Bit() && !Triple.isOSWindows());
+ assert(Triple.getArch() == Triple::x86_64 && !Triple.isOSWindows());
// Determine the function's arity (i.e., the number of arguments) at the ABI
// level by counting the number of parameters that are passed
@@ -896,7 +896,7 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
if (FeatureFlagsAnd) {
// Emit a .note.gnu.property section with the flags.
- assert((TT.isArch32Bit() || TT.isArch64Bit()) &&
+ assert((TT.isArch32Bit() || TT.getArch() == Triple::x86_64) &&
"CFProtection used on invalid architecture!");
MCSection *Cur = OutStreamer->getCurrentSectionOnly();
MCSection *Nt = MMI->getContext().getELFSection(
@@ -904,7 +904,8 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
OutStreamer->switchSection(Nt);
// Emitting note header.
- const int WordSize = TT.isArch64Bit() && !TT.isX32() ? 8 : 4;
+ const int WordSize =
+ TT.getArch() == Triple::x86_64 && !TT.isX32() ? 8 : 4;
emitAlignment(WordSize == 4 ? Align(4) : Align(8));
OutStreamer->emitIntValue(4, 4 /*size*/); // data size for "GNU\0"
OutStreamer->emitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index b79e508df7c97..5c3a67d90a994 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -53,14 +53,15 @@ static cl::opt<bool>
extern cl::opt<bool> X86EnableAPXForRelocation;
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
- : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
- X86_MC::getDwarfRegFlavour(TT, false),
- X86_MC::getDwarfRegFlavour(TT, true),
- (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
+ : X86GenRegisterInfo(
+ (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP),
+ X86_MC::getDwarfRegFlavour(TT, false),
+ X86_MC::getDwarfRegFlavour(TT, true),
+ (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP)) {
X86_MC::initLLVMToSEHAndCVRegMapping(this);
// Cache some information.
- Is64Bit = TT.isArch64Bit();
+ Is64Bit = TT.getArch() == Triple::x86_64;
IsWin64 = Is64Bit && TT.isOSWindows();
IsUEFI64 = Is64Bit && TT.isUEFI();
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 6d9c6cdedd9e5..5f76e2a550f40 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -131,7 +131,7 @@ static std::string computeDataLayout(const Triple &TT) {
Ret += DataLayout::getManglingComponent(TT);
// X86 and x32 have 32 bit pointers.
- if (!TT.isArch64Bit() || TT.isX32())
+ if (TT.getArch() != Triple::x86_64 || TT.isX32())
Ret += "-p:32:32";
// Address spaces for 32 bit signed, 32 bit unsigned, and 64 bit pointers.
@@ -140,7 +140,7 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
// 128 bit integers are not specified in the 32-bit ABIs but are used
// internally for lowering f128, so we match the alignment to that.
- if (TT.isArch64Bit() || TT.isOSWindows())
+ if (TT.getArch() == Triple::x86_64 || TT.isOSWindows())
Ret += "-i64:64-i128:128";
else if (TT.isOSIAMCU())
Ret += "-i64:32-f64:32";
@@ -150,7 +150,8 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align long double to 128 bits, others to 32.
if (TT.isOSIAMCU())
; // No f80
- else if (TT.isArch64Bit() || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
+ else if (TT.getArch() == Triple::x86_64 || TT.isOSDarwin() ||
+ TT.isWindowsMSVCEnvironment())
Ret += "-f80:128";
else
Ret += "-f80:32";
@@ -159,13 +160,13 @@ static std::string computeDataLayout(const Triple &TT) {
Ret += "-f128:32";
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
- if (TT.isArch64Bit())
+ if (TT.getArch() == Triple::x86_64)
Ret += "-n8:16:32:64";
else
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
- if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
+ if ((TT.getArch() != Triple::x86_64 && TT.isOSWindows()) || TT.isOSIAMCU())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
More information about the llvm-commits
mailing list