[llvm] X86: Avoid using isArch64Bit for 64-bit checks (PR #157412)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 11 05:36:25 PDT 2025


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/157412

>From 84f1926c93de52496904e79ae6a6edc354e90ea7 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 8 Sep 2025 17:34:44 +0900
Subject: [PATCH 1/2] X86: Avoid using isArch64Bit for 64-bit checks

Just directly check x86_64. isArch64Bit just adds extra
steps around this.
---
 llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp   |  2 +-
 llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp |  2 +-
 llvm/lib/Target/X86/X86AsmPrinter.cpp                |  7 ++++---
 llvm/lib/Target/X86/X86RegisterInfo.cpp              | 11 ++++++-----
 llvm/lib/Target/X86/X86TargetMachine.cpp             | 11 ++++++-----
 5 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 865fc0ce8101b..f01805919b9bc 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -1286,7 +1286,7 @@ class DarwinX86AsmBackend : public X86AsmBackend {
   DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
                       const MCSubtargetInfo &STI)
       : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
-        Is64Bit(TT.isArch64Bit()) {
+        Is64Bit(TT.getArch() == Triple::x86_64) {
     memset(SavedRegs, 0, sizeof(SavedRegs));
     OffsetSize = Is64Bit ? 8 : 4;
     MoveInstrSize = Is64Bit ? 3 : 2;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index bb1e716c33ed5..b663e57b3c759 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -48,7 +48,7 @@ std::string X86_MC::ParseX86Triple(const Triple &TT) {
   std::string FS;
   // SSE2 should default to enabled in 64-bit mode, but can be turned off
   // explicitly.
-  if (TT.isArch64Bit())
+  if (TT.getArch() == Triple::x86_64)
     FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
   else if (TT.getEnvironment() != Triple::CODE16)
     FS = "-64bit-mode,+32bit-mode,-16bit-mode";
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index ff22ee8c86fac..6cad6fdc6b4d4 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -194,7 +194,7 @@ void X86AsmPrinter::emitKCFITypeId(const MachineFunction &MF) {
   if (F.getParent()->getModuleFlag("kcfi-arity")) {
     // The ArityToRegMap assumes the 64-bit SysV ABI.
     [[maybe_unused]] const auto &Triple = MF.getTarget().getTargetTriple();
-    assert(Triple.isArch64Bit() && !Triple.isOSWindows());
+    assert(Triple.getArch() == Triple::x86_64 && !Triple.isOSWindows());
 
     // Determine the function's arity (i.e., the number of arguments) at the ABI
     // level by counting the number of parameters that are passed
@@ -897,7 +897,7 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
 
     if (FeatureFlagsAnd) {
       // Emit a .note.gnu.property section with the flags.
-      assert((TT.isArch32Bit() || TT.isArch64Bit()) &&
+      assert((TT.isArch32Bit() || TT.getArch() == Triple::x86_64) &&
              "CFProtection used on invalid architecture!");
       MCSection *Cur = OutStreamer->getCurrentSectionOnly();
       MCSection *Nt = MMI->getContext().getELFSection(
@@ -905,7 +905,8 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
       OutStreamer->switchSection(Nt);
 
       // Emitting note header.
-      const int WordSize = TT.isArch64Bit() && !TT.isX32() ? 8 : 4;
+      const int WordSize =
+          TT.getArch() == Triple::x86_64 && !TT.isX32() ? 8 : 4;
       emitAlignment(WordSize == 4 ? Align(4) : Align(8));
       OutStreamer->emitIntValue(4, 4 /*size*/); // data size for "GNU\0"
       OutStreamer->emitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 9ec04e740a08b..455a0df4084d0 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -53,14 +53,15 @@ static cl::opt<bool>
 extern cl::opt<bool> X86EnableAPXForRelocation;
 
 X86RegisterInfo::X86RegisterInfo(const Triple &TT)
-    : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
-                         X86_MC::getDwarfRegFlavour(TT, false),
-                         X86_MC::getDwarfRegFlavour(TT, true),
-                         (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
+    : X86GenRegisterInfo(
+          (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP),
+          X86_MC::getDwarfRegFlavour(TT, false),
+          X86_MC::getDwarfRegFlavour(TT, true),
+          (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP)) {
   X86_MC::initLLVMToSEHAndCVRegMapping(this);
 
   // Cache some information.
-  Is64Bit = TT.isArch64Bit();
+  Is64Bit = TT.getArch() == Triple::x86_64;
   IsWin64 = Is64Bit && TT.isOSWindows();
   IsUEFI64 = Is64Bit && TT.isUEFI();
 
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 6d9c6cdedd9e5..5f76e2a550f40 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -131,7 +131,7 @@ static std::string computeDataLayout(const Triple &TT) {
 
   Ret += DataLayout::getManglingComponent(TT);
   // X86 and x32 have 32 bit pointers.
-  if (!TT.isArch64Bit() || TT.isX32())
+  if (TT.getArch() != Triple::x86_64 || TT.isX32())
     Ret += "-p:32:32";
 
   // Address spaces for 32 bit signed, 32 bit unsigned, and 64 bit pointers.
@@ -140,7 +140,7 @@ static std::string computeDataLayout(const Triple &TT) {
   // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
   // 128 bit integers are not specified in the 32-bit ABIs but are used
   // internally for lowering f128, so we match the alignment to that.
-  if (TT.isArch64Bit() || TT.isOSWindows())
+  if (TT.getArch() == Triple::x86_64 || TT.isOSWindows())
     Ret += "-i64:64-i128:128";
   else if (TT.isOSIAMCU())
     Ret += "-i64:32-f64:32";
@@ -150,7 +150,8 @@ static std::string computeDataLayout(const Triple &TT) {
   // Some ABIs align long double to 128 bits, others to 32.
   if (TT.isOSIAMCU())
     ; // No f80
-  else if (TT.isArch64Bit() || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
+  else if (TT.getArch() == Triple::x86_64 || TT.isOSDarwin() ||
+           TT.isWindowsMSVCEnvironment())
     Ret += "-f80:128";
   else
     Ret += "-f80:32";
@@ -159,13 +160,13 @@ static std::string computeDataLayout(const Triple &TT) {
     Ret += "-f128:32";
 
   // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
-  if (TT.isArch64Bit())
+  if (TT.getArch() == Triple::x86_64)
     Ret += "-n8:16:32:64";
   else
     Ret += "-n8:16:32";
 
   // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
-  if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
+  if ((TT.getArch() != Triple::x86_64 && TT.isOSWindows()) || TT.isOSIAMCU())
     Ret += "-a:0:32-S32";
   else
     Ret += "-S128";

>From 82cedd665537555be843ca2bb92026c701298416 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Thu, 11 Sep 2025 21:24:16 +0900
Subject: [PATCH 2/2] new triple checks

---
 llvm/include/llvm/TargetParser/Triple.h       |  6 +++++
 .../Target/X86/MCTargetDesc/X86AsmBackend.cpp |  4 +--
 .../Target/X86/MCTargetDesc/X86MCAsmInfo.cpp  |  8 +++---
 .../X86/MCTargetDesc/X86MCTargetDesc.cpp      | 11 ++++----
 llvm/lib/Target/X86/X86AsmPrinter.cpp         |  9 +++----
 llvm/lib/Target/X86/X86IndirectThunks.cpp     |  4 +--
 llvm/lib/Target/X86/X86RegisterInfo.cpp       | 11 ++++----
 llvm/lib/Target/X86/X86ReturnThunks.cpp       |  2 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      | 27 +++++++++----------
 9 files changed, 42 insertions(+), 40 deletions(-)

diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index c8fa482a9a4f4..d0a8f36061822 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -1103,6 +1103,12 @@ class Triple {
     return getArch() == Triple::x86 || getArch() == Triple::x86_64;
   }
 
+  /// Tests whether the target is x86 (32-bit).
+  bool isX86_32() const { return getArch() == Triple::x86; }
+
+  /// Tests whether the target is x86 (64-bit).
+  bool isX86_64() const { return getArch() == Triple::x86_64; }
+
   /// Tests whether the target is VE
   bool isVE() const {
     return getArch() == Triple::ve;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index f01805919b9bc..74de51c7eb1cc 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -559,7 +559,7 @@ void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
 std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
   if (STI.getTargetTriple().isOSBinFormatELF()) {
     unsigned Type;
-    if (STI.getTargetTriple().getArch() == Triple::x86_64) {
+    if (STI.getTargetTriple().isX86_64()) {
       Type = llvm::StringSwitch<unsigned>(Name)
 #define ELF_RELOC(X, Y) .Case(#X, Y)
 #include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
@@ -1286,7 +1286,7 @@ class DarwinX86AsmBackend : public X86AsmBackend {
   DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
                       const MCSubtargetInfo &STI)
       : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
-        Is64Bit(TT.getArch() == Triple::x86_64) {
+        Is64Bit(TT.isX86_64()) {
     memset(SavedRegs, 0, sizeof(SavedRegs));
     OffsetSize = Is64Bit ? 8 : 4;
     MoveInstrSize = Is64Bit ? 3 : 2;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index d6915383faf83..7306a08ff088e 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -68,7 +68,7 @@ const MCAsmInfo::AtSpecifier atSpecifiers[] = {
 void X86MCAsmInfoDarwin::anchor() { }
 
 X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) {
-  bool is64Bit = T.getArch() == Triple::x86_64;
+  bool is64Bit = T.isX86_64();
   if (is64Bit)
     CodePointerSize = CalleeSaveStackSlotSize = 8;
 
@@ -113,7 +113,7 @@ X86_64MCAsmInfoDarwin::X86_64MCAsmInfoDarwin(const Triple &Triple)
 void X86ELFMCAsmInfo::anchor() { }
 
 X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
-  bool is64Bit = T.getArch() == Triple::x86_64;
+  bool is64Bit = T.isX86_64();
   bool isX32 = T.isX32();
 
   // For ELF, x86-64 pointer size depends on the ABI.
@@ -149,7 +149,7 @@ X86_64MCAsmInfoDarwin::getExprForPersonalitySymbol(const MCSymbol *Sym,
 void X86MCAsmInfoMicrosoft::anchor() { }
 
 X86MCAsmInfoMicrosoft::X86MCAsmInfoMicrosoft(const Triple &Triple) {
-  if (Triple.getArch() == Triple::x86_64) {
+  if (Triple.isX86_64()) {
     PrivateGlobalPrefix = ".L";
     PrivateLabelPrefix = ".L";
     CodePointerSize = 8;
@@ -189,7 +189,7 @@ void X86MCAsmInfoGNUCOFF::anchor() { }
 X86MCAsmInfoGNUCOFF::X86MCAsmInfoGNUCOFF(const Triple &Triple) {
   assert((Triple.isOSWindows() || Triple.isUEFI()) &&
          "Windows and UEFI are the only supported COFF targets");
-  if (Triple.getArch() == Triple::x86_64) {
+  if (Triple.isX86_64()) {
     PrivateGlobalPrefix = ".L";
     PrivateLabelPrefix = ".L";
     CodePointerSize = 8;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index b663e57b3c759..d8cb4eacf905c 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -48,7 +48,7 @@ std::string X86_MC::ParseX86Triple(const Triple &TT) {
   std::string FS;
   // SSE2 should default to enabled in 64-bit mode, but can be turned off
   // explicitly.
-  if (TT.getArch() == Triple::x86_64)
+  if (TT.isX86_64())
     FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
   else if (TT.getEnvironment() != Triple::CODE16)
     FS = "-64bit-mode,+32bit-mode,-16bit-mode";
@@ -59,7 +59,7 @@ std::string X86_MC::ParseX86Triple(const Triple &TT) {
 }
 
 unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
-  if (TT.getArch() == Triple::x86_64)
+  if (TT.isX86_64())
     return DWARFFlavour::X86_64;
 
   if (TT.isOSDarwin())
@@ -407,9 +407,8 @@ static MCInstrInfo *createX86MCInstrInfo() {
 }
 
 static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
-  unsigned RA = (TT.getArch() == Triple::x86_64)
-                    ? X86::RIP  // Should have dwarf #16.
-                    : X86::EIP; // Should have dwarf #8.
+  unsigned RA = TT.isX86_64() ? X86::RIP  // Should have dwarf #16.
+                              : X86::EIP; // Should have dwarf #8.
 
   MCRegisterInfo *X = new MCRegisterInfo();
   InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
@@ -421,7 +420,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
 static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
                                      const Triple &TheTriple,
                                      const MCTargetOptions &Options) {
-  bool is64Bit = TheTriple.getArch() == Triple::x86_64;
+  bool is64Bit = TheTriple.isX86_64();
 
   MCAsmInfo *MAI;
   if (TheTriple.isOSBinFormatMachO()) {
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 6cad6fdc6b4d4..51c13798ddde3 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -194,7 +194,7 @@ void X86AsmPrinter::emitKCFITypeId(const MachineFunction &MF) {
   if (F.getParent()->getModuleFlag("kcfi-arity")) {
     // The ArityToRegMap assumes the 64-bit SysV ABI.
     [[maybe_unused]] const auto &Triple = MF.getTarget().getTargetTriple();
-    assert(Triple.getArch() == Triple::x86_64 && !Triple.isOSWindows());
+    assert(Triple.isX86_64() && !Triple.isOSWindows());
 
     // Determine the function's arity (i.e., the number of arguments) at the ABI
     // level by counting the number of parameters that are passed
@@ -897,7 +897,7 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
 
     if (FeatureFlagsAnd) {
       // Emit a .note.gnu.property section with the flags.
-      assert((TT.isArch32Bit() || TT.getArch() == Triple::x86_64) &&
+      assert((TT.isX86_32() || TT.isX86_64()) &&
              "CFProtection used on invalid architecture!");
       MCSection *Cur = OutStreamer->getCurrentSectionOnly();
       MCSection *Nt = MMI->getContext().getELFSection(
@@ -905,8 +905,7 @@ void X86AsmPrinter::emitStartOfAsmFile(Module &M) {
       OutStreamer->switchSection(Nt);
 
       // Emitting note header.
-      const int WordSize =
-          TT.getArch() == Triple::x86_64 && !TT.isX32() ? 8 : 4;
+      const int WordSize = TT.isX86_64() && !TT.isX32() ? 8 : 4;
       emitAlignment(WordSize == 4 ? Align(4) : Align(8));
       OutStreamer->emitIntValue(4, 4 /*size*/); // data size for "GNU\0"
       OutStreamer->emitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size
@@ -1090,7 +1089,7 @@ void X86AsmPrinter::emitEndOfAsmFile(Module &M) {
   }
 
   // Emit __morestack address if needed for indirect calls.
-  if (TT.getArch() == Triple::x86_64 && TM.getCodeModel() == CodeModel::Large) {
+  if (TT.isX86_64() && TM.getCodeModel() == CodeModel::Large) {
     if (MCSymbol *AddrSymbol = OutContext.lookupSymbol("__morestack_addr")) {
       Align Alignment(1);
       MCSection *ReadOnlySection = getObjFileLowering().getSectionForConstant(
diff --git a/llvm/lib/Target/X86/X86IndirectThunks.cpp b/llvm/lib/Target/X86/X86IndirectThunks.cpp
index c5a5e6e621ffe..adac7b3572e7c 100644
--- a/llvm/lib/Target/X86/X86IndirectThunks.cpp
+++ b/llvm/lib/Target/X86/X86IndirectThunks.cpp
@@ -115,7 +115,7 @@ bool RetpolineThunkInserter::insertThunks(MachineModuleInfo &MMI,
                                           bool ExistingThunks) {
   if (ExistingThunks)
     return false;
-  if (MMI.getTarget().getTargetTriple().getArch() == Triple::x86_64)
+  if (MMI.getTarget().getTargetTriple().isX86_64())
     createThunkFunction(MMI, R11RetpolineName);
   else
     for (StringRef Name : {EAXRetpolineName, ECXRetpolineName, EDXRetpolineName,
@@ -125,7 +125,7 @@ bool RetpolineThunkInserter::insertThunks(MachineModuleInfo &MMI,
 }
 
 void RetpolineThunkInserter::populateThunk(MachineFunction &MF) {
-  bool Is64Bit = MF.getTarget().getTargetTriple().getArch() == Triple::x86_64;
+  bool Is64Bit = MF.getTarget().getTargetTriple().isX86_64();
   Register ThunkReg;
   if (Is64Bit) {
     assert(MF.getName() == "__llvm_retpoline_r11" &&
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 455a0df4084d0..83a33eb5faaa6 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -53,15 +53,14 @@ static cl::opt<bool>
 extern cl::opt<bool> X86EnableAPXForRelocation;
 
 X86RegisterInfo::X86RegisterInfo(const Triple &TT)
-    : X86GenRegisterInfo(
-          (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP),
-          X86_MC::getDwarfRegFlavour(TT, false),
-          X86_MC::getDwarfRegFlavour(TT, true),
-          (TT.getArch() == Triple::x86_64 ? X86::RIP : X86::EIP)) {
+    : X86GenRegisterInfo((TT.isX86_64() ? X86::RIP : X86::EIP),
+                         X86_MC::getDwarfRegFlavour(TT, false),
+                         X86_MC::getDwarfRegFlavour(TT, true),
+                         (TT.isX86_64() ? X86::RIP : X86::EIP)) {
   X86_MC::initLLVMToSEHAndCVRegMapping(this);
 
   // Cache some information.
-  Is64Bit = TT.getArch() == Triple::x86_64;
+  Is64Bit = TT.isX86_64();
   IsWin64 = Is64Bit && TT.isOSWindows();
   IsUEFI64 = Is64Bit && TT.isUEFI();
 
diff --git a/llvm/lib/Target/X86/X86ReturnThunks.cpp b/llvm/lib/Target/X86/X86ReturnThunks.cpp
index c40b4f371fb31..279373b724ace 100644
--- a/llvm/lib/Target/X86/X86ReturnThunks.cpp
+++ b/llvm/lib/Target/X86/X86ReturnThunks.cpp
@@ -68,7 +68,7 @@ bool X86ReturnThunks::runOnMachineFunction(MachineFunction &MF) {
     return Modified;
 
   const auto &ST = MF.getSubtarget<X86Subtarget>();
-  const bool Is64Bit = ST.getTargetTriple().getArch() == Triple::x86_64;
+  const bool Is64Bit = ST.getTargetTriple().isX86_64();
   const unsigned RetOpc = Is64Bit ? X86::RET64 : X86::RET32;
   SmallVector<MachineInstr *, 16> Rets;
 
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 5f76e2a550f40..c23ae9619aac4 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -112,7 +112,7 @@ extern "C" LLVM_C_ABI void LLVMInitializeX86Target() {
 
 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
   if (TT.isOSBinFormatMachO()) {
-    if (TT.getArch() == Triple::x86_64)
+    if (TT.isX86_64())
       return std::make_unique<X86_64MachoTargetObjectFile>();
     return std::make_unique<TargetLoweringObjectFileMachO>();
   }
@@ -120,7 +120,7 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
   if (TT.isOSBinFormatCOFF())
     return std::make_unique<TargetLoweringObjectFileCOFF>();
 
-  if (TT.getArch() == Triple::x86_64)
+  if (TT.isX86_64())
     return std::make_unique<X86_64ELFTargetObjectFile>();
   return std::make_unique<X86ELFTargetObjectFile>();
 }
@@ -131,7 +131,7 @@ static std::string computeDataLayout(const Triple &TT) {
 
   Ret += DataLayout::getManglingComponent(TT);
   // X86 and x32 have 32 bit pointers.
-  if (TT.getArch() != Triple::x86_64 || TT.isX32())
+  if (TT.isX86_32() || TT.isX32())
     Ret += "-p:32:32";
 
   // Address spaces for 32 bit signed, 32 bit unsigned, and 64 bit pointers.
@@ -140,7 +140,7 @@ static std::string computeDataLayout(const Triple &TT) {
   // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
   // 128 bit integers are not specified in the 32-bit ABIs but are used
   // internally for lowering f128, so we match the alignment to that.
-  if (TT.getArch() == Triple::x86_64 || TT.isOSWindows())
+  if (TT.isX86_64() || TT.isOSWindows())
     Ret += "-i64:64-i128:128";
   else if (TT.isOSIAMCU())
     Ret += "-i64:32-f64:32";
@@ -150,8 +150,7 @@ static std::string computeDataLayout(const Triple &TT) {
   // Some ABIs align long double to 128 bits, others to 32.
   if (TT.isOSIAMCU())
     ; // No f80
-  else if (TT.getArch() == Triple::x86_64 || TT.isOSDarwin() ||
-           TT.isWindowsMSVCEnvironment())
+  else if (TT.isX86_64() || TT.isOSDarwin() || TT.isWindowsMSVCEnvironment())
     Ret += "-f80:128";
   else
     Ret += "-f80:32";
@@ -160,13 +159,13 @@ static std::string computeDataLayout(const Triple &TT) {
     Ret += "-f128:32";
 
   // The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
-  if (TT.getArch() == Triple::x86_64)
+  if (TT.isX86_64())
     Ret += "-n8:16:32:64";
   else
     Ret += "-n8:16:32";
 
   // The stack is aligned to 32 bits on some ABIs and 128 bits on others.
-  if ((TT.getArch() != Triple::x86_64 && TT.isOSWindows()) || TT.isOSIAMCU())
+  if ((TT.isX86_32() && TT.isOSWindows()) || TT.isOSIAMCU())
     Ret += "-a:0:32-S32";
   else
     Ret += "-S128";
@@ -176,7 +175,7 @@ static std::string computeDataLayout(const Triple &TT) {
 
 static Reloc::Model getEffectiveRelocModel(const Triple &TT, bool JIT,
                                            std::optional<Reloc::Model> RM) {
-  bool is64Bit = TT.getArch() == Triple::x86_64;
+  bool is64Bit = TT.isX86_64();
   if (!RM) {
     // JIT codegen should use static relocations by default, since it's
     // typically executed in process and not relocatable.
@@ -218,7 +217,7 @@ static Reloc::Model getEffectiveRelocModel(const Triple &TT, bool JIT,
 static CodeModel::Model
 getEffectiveX86CodeModel(const Triple &TT, std::optional<CodeModel::Model> CM,
                          bool JIT) {
-  bool Is64Bit = TT.getArch() == Triple::x86_64;
+  bool Is64Bit = TT.isX86_64();
   if (CM) {
     if (*CM == CodeModel::Tiny)
       reportFatalUsageError("target does not support the tiny CodeModel");
@@ -489,7 +488,7 @@ void X86PassConfig::addIRPasses() {
   // Add Control Flow Guard checks.
   const Triple &TT = TM->getTargetTriple();
   if (TT.isOSWindows()) {
-    if (TT.getArch() == Triple::x86_64) {
+    if (TT.isX86_64()) {
       addPass(createCFGuardDispatchPass());
     } else {
       addPass(createCFGuardCheckPass());
@@ -548,7 +547,7 @@ bool X86PassConfig::addILPOpts() {
 bool X86PassConfig::addPreISel() {
   // Only add this pass for 32-bit x86 Windows.
   const Triple &TT = TM->getTargetTriple();
-  if (TT.isOSWindows() && TT.getArch() == Triple::x86)
+  if (TT.isOSWindows() && TT.isX86_32())
     addPass(createX86WinEHStatePass());
   return true;
 }
@@ -637,7 +636,7 @@ void X86PassConfig::addPreEmitPass2() {
 
   // Insert extra int3 instructions after trailing call instructions to avoid
   // issues in the unwinder.
-  if (TT.isOSWindows() && TT.getArch() == Triple::x86_64)
+  if (TT.isOSWindows() && TT.isX86_64())
     addPass(createX86AvoidTrailingCallPass());
 
   // Verify basic block incoming and outgoing cfa offset and register values and
@@ -674,7 +673,7 @@ void X86PassConfig::addPreEmitPass2() {
 
   // Analyzes and emits pseudos to support Win x64 Unwind V2. This pass must run
   // after all real instructions have been added to the epilog.
-  if (TT.isOSWindows() && (TT.getArch() == Triple::x86_64))
+  if (TT.isOSWindows() && TT.isX86_64())
     addPass(createX86WinEHUnwindV2Pass());
 }
 



More information about the llvm-commits mailing list