[llvm] UEFI backend for x86_64 (PR #109320)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 18 15:50:29 PDT 2025


https://github.com/Prabhuk updated https://github.com/llvm/llvm-project/pull/109320

>From 07417dd0bf81bc06a80de5d88e55752a767e1516 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Mon, 23 Dec 2024 14:38:34 -0800
Subject: [PATCH 01/10] UEFI backend for x86_64

---
 .../X86/MCTargetDesc/X86MCTargetDesc.cpp      |  4 +-
 llvm/lib/Target/X86/X86CallingConv.td         | 17 ++++++++-
 llvm/lib/Target/X86/X86FrameLowering.cpp      | 20 ++++++++++
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 37 ++++++++++++-------
 llvm/lib/Target/X86/X86RegisterInfo.cpp       | 11 +++---
 llvm/lib/Target/X86/X86RegisterInfo.h         |  6 ++-
 llvm/lib/Target/X86/X86Subtarget.h            | 15 ++++++--
 llvm/lib/Target/X86/X86TargetMachine.cpp      |  2 +-
 llvm/test/CodeGen/X86/mangle-question-mark.ll |  1 +
 llvm/test/CodeGen/X86/sse-regcall.ll          |  1 +
 llvm/test/CodeGen/X86/win32-preemption.ll     |  2 +
 11 files changed, 88 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 1c4d68d5448d6..f552ca329a4af 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -444,7 +444,7 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
     // Force the use of an ELF container.
     MAI = new X86ELFMCAsmInfo(TheTriple);
   } else if (TheTriple.isWindowsMSVCEnvironment() ||
-             TheTriple.isWindowsCoreCLREnvironment()) {
+             TheTriple.isWindowsCoreCLREnvironment() || TheTriple.isUEFI()) {
     if (Options.getAssemblyLanguage().equals_insensitive("masm"))
       MAI = new X86MCAsmInfoMicrosoftMASM(TheTriple);
     else
@@ -452,8 +452,6 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
   } else if (TheTriple.isOSCygMing() ||
              TheTriple.isWindowsItaniumEnvironment()) {
     MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
-  } else if (TheTriple.isUEFI()) {
-    MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
   } else {
     // The default is ELF.
     MAI = new X86ELFMCAsmInfo(TheTriple);
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 91af111db8cda..9315ed422a456 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -488,13 +488,24 @@ def RetCC_X86_64 : CallingConv<[
     CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
 
   CCIfCC<"CallingConv::X86_RegCall",
-          CCIfSubtarget<"isTargetWin64()",
+    CCIfSubtarget<"isTargetUEFI64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
+
+  CCIfCC<"CallingConv::X86_RegCall",
+          CCIfSubtarget<"isTargetWin64()",                        
                         CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
+
+  CCIfCC<"CallingConv::X86_RegCall",
+        CCIfSubtarget<"isTargetUEFI64()",
+                      CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
+
   CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
           
   // Mingw64 and native Win64 use Win64 CC
   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
 
+  // UEFI64 uses Win64 CC
+  CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<RetCC_X86_Win64_C>>,
+
   // Otherwise, drop to normal X86-64 CC
   CCDelegateTo<RetCC_X86_64_C>
 ]>;
@@ -1079,6 +1090,10 @@ def CC_X86_64 : CallingConv<[
     CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
   CCIfCC<"CallingConv::X86_RegCall",
     CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+    CCIfSubtarget<"isTargetUEFI64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+    CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
   CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
   CCIfCC<"CallingConv::PreserveNone", CCDelegateTo<CC_X86_64_Preserve_None>>,
   CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 4d40c23eb5617..68c12e616b6e0 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1533,11 +1533,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
          "MF used frame lowering for wrong subtarget");
+
   MachineBasicBlock::iterator MBBI = MBB.begin();
   MachineFrameInfo &MFI = MF.getFrameInfo();
   const Function &Fn = MF.getFunction();
+
   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
   uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
+
+  // errs() << "********** MaxAlign size " << MaxAlign;
+
   uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
   bool IsFunclet = MBB.isEHFuncletEntry();
   EHPersonality Personality = EHPersonality::Unknown;
@@ -1548,6 +1553,12 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
   bool HasFP = hasFP(MF);
   bool IsWin64Prologue = isWin64Prologue(MF);
+
+  // if(IsWin64Prologue) {
+  //   errs() << "********** IsWin64Prologue TRUE ";
+  // } else {
+  //   errs() << "********** IsWin64Prologue FALSE FALSE FALSE ";
+  // }
   bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
   // FIXME: Emit FPO data for EH funclets.
   bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() &&
@@ -1671,6 +1682,12 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   // pointer, calls, or dynamic alloca then we do not need to adjust the
   // stack pointer (we fit in the Red Zone). We also check that we don't
   // push and pop from the stack.
+
+  // if (has128ByteRedZone(MF)) {
+  //   errs() << "********** has128ByteRedZone TRUE ";
+  // } else {
+  //   errs() << "********** has128ByteRedZone FALSE FALSE FALSE ";
+  // }
   if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) &&
       !MFI.hasVarSizedObjects() &&             // No dynamic alloca.
       !MFI.adjustsStack() &&                   // No calls.
@@ -1679,6 +1696,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
       !MF.shouldSplitStack()) {                // Regular stack
     uint64_t MinSize =
         X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta();
+
     if (HasFP)
       MinSize += SlotSize;
     X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
@@ -1894,7 +1912,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
            Opc == X86::PUSH2 || Opc == X86::PUSH2P;
   };
 
+  // uint64_t cont3 = 1;
   while (IsCSPush(MBBI)) {
+    // llvm::outs() << "\n*********** cont3 " << cont3++;
     PushedRegs = true;
     Register Reg = MBBI->getOperand(0).getReg();
     LastCSPush = MBBI;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3b260a89911c4..be45cec15740c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -578,6 +578,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
   // FIXME - use subtarget debug flags
   if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
       !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
+      !Subtarget.isTargetUEFI64() &&
       TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
     setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
   }
@@ -2543,7 +2544,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
   }
 
-  if (Subtarget.isTargetWin64()) {
+  if (Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) {
     setOperationAction(ISD::SDIV, MVT::i128, Custom);
     setOperationAction(ISD::UDIV, MVT::i128, Custom);
     setOperationAction(ISD::SREM, MVT::i128, Custom);
@@ -19656,7 +19657,8 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
   else if (isLegalConversion(SrcVT, true, Subtarget))
     return Op;
 
-  if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
+  if ((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
+      SrcVT == MVT::i128)
     return LowerWin64_INT128_TO_FP(Op, DAG);
 
   if (SDValue Extract = vectorizeExtractedCast(Op, dl, DAG, Subtarget))
@@ -20163,7 +20165,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
   if (DstVT.isVector())
     return lowerUINT_TO_FP_vec(Op, dl, DAG, Subtarget);
 
-  if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
+  if ((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
+      SrcVT == MVT::i128)
     return LowerWin64_INT128_TO_FP(Op, DAG);
 
   if (SDValue Extract = vectorizeExtractedCast(Op, dl, DAG, Subtarget))
@@ -27880,7 +27883,6 @@ Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
                      .Case("r14", X86::R14)
                      .Case("r15", X86::R15)
                      .Default(0);
-
   if (Reg == X86::EBP || Reg == X86::RBP) {
     if (!TFI.hasFP(MF))
       report_fatal_error("register " + StringRef(RegName) +
@@ -27924,7 +27926,7 @@ Register X86TargetLowering::getExceptionSelectorRegister(
 }
 
 bool X86TargetLowering::needsFixedCatchObjects() const {
-  return Subtarget.isTargetWin64();
+  return Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64();
 }
 
 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -29515,7 +29517,8 @@ static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
 }
 
 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
-  assert(Subtarget.isTargetWin64() && "Unexpected target");
+  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
+         "Unexpected target");
   EVT VT = Op.getValueType();
   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
          "Unexpected return type for lowering");
@@ -29582,7 +29585,8 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
                                                    SelectionDAG &DAG,
                                                    SDValue &Chain) const {
-  assert(Subtarget.isTargetWin64() && "Unexpected target");
+  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
+         "Unexpected target");
   EVT VT = Op.getValueType();
   bool IsStrict = Op->isStrictFPOpcode();
 
@@ -29615,7 +29619,8 @@ SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
 
 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
                                                    SelectionDAG &DAG) const {
-  assert(Subtarget.isTargetWin64() && "Unexpected target");
+  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
+         "Unexpected target");
   EVT VT = Op.getValueType();
   bool IsStrict = Op->isStrictFPOpcode();
 
@@ -33861,7 +33866,8 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
       return;
     }
 
-    if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
+    if (VT == MVT::i128 &&
+        (Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64())) {
       SDValue Chain;
       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
       Results.push_back(V);
@@ -37136,6 +37142,8 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
   // N.B. the order the invoke BBs are processed in doesn't matter here.
   SmallVector<MachineBasicBlock *, 64> MBBLPads;
   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
+  // llvm::outs() << "Callee saved regs from isellowering " <<
+  // SavedRegs->
   for (MachineBasicBlock *MBB : InvokeBBs) {
     // Remove the landing pad successor from the invoke block and replace it
     // with the new dispatch block.
@@ -60753,8 +60761,8 @@ bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
 /// Returns true if stack probing through inline assembly is requested.
 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
 
-  // No inline stack probe for Windows, they have their own mechanism.
-  if (Subtarget.isOSWindows() ||
+  // No inline stack probe for Windows and UEFI, they have their own mechanism.
+  if (Subtarget.isOSWindows() || Subtarget.isUEFI() ||
       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
     return false;
 
@@ -60778,9 +60786,10 @@ X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
   if (MF.getFunction().hasFnAttribute("probe-stack"))
     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
 
-  // Generally, if we aren't on Windows, the platform ABI does not include
-  // support for stack probes, so don't emit them.
-  if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
+  // Generally, if we aren't on Windows or UEFI, the platform ABI does not
+  // include support for stack probes, so don't emit them.
+  if (!(Subtarget.isOSWindows() || Subtarget.isUEFI()) ||
+      Subtarget.isTargetMachO() ||
       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
     return "";
 
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 164d420595516..0e797f286411e 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -60,6 +60,7 @@ X86RegisterInfo::X86RegisterInfo(const Triple &TT)
   // Cache some information.
   Is64Bit = TT.isArch64Bit();
   IsWin64 = Is64Bit && TT.isOSWindows();
+  IsUEFI64 = Is64Bit && TT.isUEFI();
 
   // Use a callee-saved register as the base pointer.  These registers must
   // not conflict with any ABI requirements.  For example, in 32-bit mode PIC
@@ -242,7 +243,7 @@ bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
 const TargetRegisterClass *
 X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
   const Function &F = MF.getFunction();
-  if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
+  if (IsWin64 || IsUEFI64 || (F.getCallingConv() == CallingConv::Win64))
     return &X86::GR64_TCW64RegClass;
   else if (Is64Bit)
     return &X86::GR64_TCRegClass;
@@ -344,7 +345,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   }
   case CallingConv::X86_RegCall:
     if (Is64Bit) {
-      if (IsWin64) {
+      if (IsWin64 || IsUEFI64) {
         return (HasSSE ? CSR_Win64_RegCall_SaveList :
                          CSR_Win64_RegCall_NoSSE_SaveList);
       } else {
@@ -404,7 +405,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
       return IsWin64 ? CSR_Win64_SwiftError_SaveList
                      : CSR_64_SwiftError_SaveList;
 
-    if (IsWin64)
+    if (IsWin64 || IsUEFI64)
       return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;
     if (CallsEHReturn)
       return CSR_64EHRet_SaveList;
@@ -471,7 +472,7 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
   }
   case CallingConv::X86_RegCall:
     if (Is64Bit) {
-      if (IsWin64) {
+      if (IsWin64 || IsUEFI64) {
         return (HasSSE ? CSR_Win64_RegCall_RegMask :
                          CSR_Win64_RegCall_NoSSE_RegMask);
       } else {
@@ -529,7 +530,7 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
     if (IsSwiftCC)
       return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
 
-    return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
+    return (IsWin64 || IsUEFI64) ? CSR_Win64_RegMask : CSR_64_RegMask;
   }
 
   return CSR_32_RegMask;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.h b/llvm/lib/Target/X86/X86RegisterInfo.h
index 68ee372f27b14..0405137d28c6c 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -27,10 +27,14 @@ class X86RegisterInfo final : public X86GenRegisterInfo {
   ///
   bool Is64Bit;
 
-  /// IsWin64 - Is the target on of win64 flavours
+  /// IsWin64 - Is the target one of win64 flavours
   ///
   bool IsWin64;
 
+  /// IsUEFI64 - Is this UEFI 64 bit target
+  ///
+  bool IsUEFI64;
+
   /// SlotSize - Stack slot size in bytes.
   ///
   unsigned SlotSize;
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index e3cb9ee8ce190..2ea6ddcbf5bac 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -321,7 +321,11 @@ class X86Subtarget final : public X86GenSubtargetInfo {
 
   bool isOSWindows() const { return TargetTriple.isOSWindows(); }
 
-  bool isTargetWin64() const { return Is64Bit && isOSWindows(); }
+  bool isUEFI() const { return TargetTriple.isUEFI(); }
+
+  bool isTargetUEFI64() const { return Is64Bit && isUEFI(); }
+
+  bool isTargetWin64() const { return Is64Bit && (isOSWindows() || isUEFI()); }
 
   bool isTargetWin32() const { return !Is64Bit && isOSWindows(); }
 
@@ -335,8 +339,11 @@ class X86Subtarget final : public X86GenSubtargetInfo {
   bool isPositionIndependent() const;
 
   bool isCallingConvWin64(CallingConv::ID CC) const {
+    // llvm::outs() << "\nIn isCallingConvWin64 check calling
+    // convention******************* ";
     switch (CC) {
-    // On Win64, all these conventions just use the default convention.
+    // On Win64 and UEFI64, all these conventions just use the default
+    // convention.
     case CallingConv::C:
     case CallingConv::Fast:
     case CallingConv::Tail:
@@ -347,9 +354,11 @@ class X86Subtarget final : public X86GenSubtargetInfo {
     case CallingConv::X86_ThisCall:
     case CallingConv::X86_VectorCall:
     case CallingConv::Intel_OCL_BI:
-      return isTargetWin64();
+      return isTargetWin64() || isTargetUEFI64();
     // This convention allows using the Win64 convention on other targets.
     case CallingConv::Win64:
+      // llvm::outs() << "\nReturning true for Win64 calling
+      // convention******************* ";
       return true;
     // This convention allows using the SysV convention on Windows targets.
     case CallingConv::X86_64_SysV:
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 20dfdd27b33df..64d170d259b50 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -484,7 +484,7 @@ void X86PassConfig::addIRPasses() {
 
   // Add Control Flow Guard checks.
   const Triple &TT = TM->getTargetTriple();
-  if (TT.isOSWindows()) {
+  if (TT.isOSWindows() || TT.isUEFI()) {
     if (TT.getArch() == Triple::x86_64) {
       addPass(createCFGuardDispatchPass());
     } else {
diff --git a/llvm/test/CodeGen/X86/mangle-question-mark.ll b/llvm/test/CodeGen/X86/mangle-question-mark.ll
index fea62ecad97c9..15fdae34bc4c3 100644
--- a/llvm/test/CodeGen/X86/mangle-question-mark.ll
+++ b/llvm/test/CodeGen/X86/mangle-question-mark.ll
@@ -2,6 +2,7 @@
 
 ; RUN: llc -mtriple i686-pc-win32 < %s | FileCheck %s --check-prefix=COFF
 ; RUN: llc -mtriple x86_64-pc-win32 < %s | FileCheck %s --check-prefix=COFF64
+; RUN: llc -mtriple x86_64-unknown-uefi < %s | FileCheck %s --check-prefix=COFF64
 ; RUN: llc -mtriple i686-linux-gnu < %s | FileCheck %s --check-prefix=ELF
 ; RUN: llc -mtriple i686-apple-darwin < %s | FileCheck %s --check-prefix=MACHO
 
diff --git a/llvm/test/CodeGen/X86/sse-regcall.ll b/llvm/test/CodeGen/X86/sse-regcall.ll
index 6f0293392eef2..923b31c882047 100644
--- a/llvm/test/CodeGen/X86/sse-regcall.ll
+++ b/llvm/test/CodeGen/X86/sse-regcall.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+sse | FileCheck --check-prefix=WIN32 %s
 ; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse | FileCheck --check-prefix=WIN64 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-uefi -mattr=+sse | FileCheck --check-prefix=WIN64 %s
 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+sse | FileCheck --check-prefix=LINUXOSX %s
 
 ; Test regcall when receiving/returning i1
diff --git a/llvm/test/CodeGen/X86/win32-preemption.ll b/llvm/test/CodeGen/X86/win32-preemption.ll
index 77dcfa7280daf..139a1e514556e 100644
--- a/llvm/test/CodeGen/X86/win32-preemption.ll
+++ b/llvm/test/CodeGen/X86/win32-preemption.ll
@@ -4,6 +4,8 @@
 ; RUN:     -relocation-model=pic     < %s | FileCheck --check-prefix=COFF %s
 ; RUN: llc -mtriple x86_64-pc-win32 \
 ; RUN:  -relocation-model=dynamic-no-pic < %s | FileCheck --check-prefix=COFF %s
+; RUN: llc -mtriple x86_64-unknown-uefi \
+; RUN:  -relocation-model=dynamic-no-pic < %s | FileCheck --check-prefix=COFF %s
 
 
 ; 32 bits

>From eb5b6e000520af98443273f5c5636d712f80fda6 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 7 Jan 2025 13:38:12 -0800
Subject: [PATCH 02/10] Remove debug prints to console.

---
 llvm/lib/Target/X86/X86FrameLowering.cpp | 16 +---------------
 llvm/lib/Target/X86/X86ISelLowering.cpp  |  2 --
 llvm/lib/Target/X86/X86Subtarget.h       |  4 ----
 3 files changed, 1 insertion(+), 21 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 68c12e616b6e0..d66a53b7900f2 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1554,12 +1554,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   bool HasFP = hasFP(MF);
   bool IsWin64Prologue = isWin64Prologue(MF);
 
-  // if(IsWin64Prologue) {
-  //   errs() << "********** IsWin64Prologue TRUE ";
-  // } else {
-  //   errs() << "********** IsWin64Prologue FALSE FALSE FALSE ";
-  // }
-  bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
+    bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
   // FIXME: Emit FPO data for EH funclets.
   bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() &&
                      MF.getFunction().getParent()->getCodeViewFlag();
@@ -1682,12 +1677,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   // pointer, calls, or dynamic alloca then we do not need to adjust the
   // stack pointer (we fit in the Red Zone). We also check that we don't
   // push and pop from the stack.
-
-  // if (has128ByteRedZone(MF)) {
-  //   errs() << "********** has128ByteRedZone TRUE ";
-  // } else {
-  //   errs() << "********** has128ByteRedZone FALSE FALSE FALSE ";
-  // }
   if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) &&
       !MFI.hasVarSizedObjects() &&             // No dynamic alloca.
       !MFI.adjustsStack() &&                   // No calls.
@@ -1696,7 +1685,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
       !MF.shouldSplitStack()) {                // Regular stack
     uint64_t MinSize =
         X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta();
-
     if (HasFP)
       MinSize += SlotSize;
     X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
@@ -1912,9 +1900,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
            Opc == X86::PUSH2 || Opc == X86::PUSH2P;
   };
 
-  // uint64_t cont3 = 1;
   while (IsCSPush(MBBI)) {
-    // llvm::outs() << "\n*********** cont3 " << cont3++;
     PushedRegs = true;
     Register Reg = MBBI->getOperand(0).getReg();
     LastCSPush = MBBI;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 688da63f5b1c2..bbd4a30e7e0a9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -37230,8 +37230,6 @@ X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
   // N.B. the order the invoke BBs are processed in doesn't matter here.
   SmallVector<MachineBasicBlock *, 64> MBBLPads;
   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
-  // llvm::outs() << "Callee saved regs from isellowering " <<
-  // SavedRegs->
   for (MachineBasicBlock *MBB : InvokeBBs) {
     // Remove the landing pad successor from the invoke block and replace it
     // with the new dispatch block.
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 2ea6ddcbf5bac..b5a6545f7e466 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -339,8 +339,6 @@ class X86Subtarget final : public X86GenSubtargetInfo {
   bool isPositionIndependent() const;
 
   bool isCallingConvWin64(CallingConv::ID CC) const {
-    // llvm::outs() << "\nIn isCallingConvWin64 check calling
-    // convention******************* ";
     switch (CC) {
     // On Win64 and UEFI64, all these conventions just use the default
     // convention.
@@ -357,8 +355,6 @@ class X86Subtarget final : public X86GenSubtargetInfo {
       return isTargetWin64() || isTargetUEFI64();
     // This convention allows using the Win64 convention on other targets.
     case CallingConv::Win64:
-      // llvm::outs() << "\nReturning true for Win64 calling
-      // convention******************* ";
       return true;
     // This convention allows using the SysV convention on Windows targets.
     case CallingConv::X86_64_SysV:

>From f295cecb616ad680bcafd19bff02702e2208d779 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 7 Jan 2025 13:41:25 -0800
Subject: [PATCH 03/10] Remove commented out code.

---
 llvm/lib/Target/X86/X86FrameLowering.cpp | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index d66a53b7900f2..4d40c23eb5617 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1533,16 +1533,11 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
                                     MachineBasicBlock &MBB) const {
   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
          "MF used frame lowering for wrong subtarget");
-
   MachineBasicBlock::iterator MBBI = MBB.begin();
   MachineFrameInfo &MFI = MF.getFrameInfo();
   const Function &Fn = MF.getFunction();
-
   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
   uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
-
-  // errs() << "********** MaxAlign size " << MaxAlign;
-
   uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
   bool IsFunclet = MBB.isEHFuncletEntry();
   EHPersonality Personality = EHPersonality::Unknown;
@@ -1553,8 +1548,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
   bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
   bool HasFP = hasFP(MF);
   bool IsWin64Prologue = isWin64Prologue(MF);
-
-    bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
+  bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
   // FIXME: Emit FPO data for EH funclets.
   bool NeedsWinFPO = !IsFunclet && STI.isTargetWin32() &&
                      MF.getFunction().getParent()->getCodeViewFlag();

>From edb715e0bf299083397763bcebb275137d2b045f Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Mon, 27 Jan 2025 14:07:44 -0800
Subject: [PATCH 04/10] ASMPrinter and Mangler fixes.

---
 llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 4 ++--
 llvm/lib/IR/Mangler.cpp                    | 4 ++--
 llvm/lib/Target/X86/X86AsmPrinter.cpp      | 2 +-
 llvm/lib/Target/X86/X86Subtarget.h         | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 7bd3fb33b47d2..153a546a608a1 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -560,7 +560,7 @@ bool AsmPrinter::doInitialization(Module &M) {
 
   if (MAI->doesSupportDebugInformation()) {
     bool EmitCodeView = M.getCodeViewFlag();
-    if (EmitCodeView && TM.getTargetTriple().isOSWindows())
+    if (EmitCodeView && (TM.getTargetTriple().isOSWindows() || TM.getTargetTriple().isUEFI()))
       DebugHandlers.push_back(std::make_unique<CodeViewDebug>(this));
     if (!EmitCodeView || M.getDwarfVersion()) {
       if (hasDebugInfo()) {
@@ -4070,7 +4070,7 @@ const MCExpr *AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
 
 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
 MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
-  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment()) {
+  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment() || getSubtargetInfo().getTargetTriple().isUEFI()) {
     const MachineConstantPoolEntry &CPE =
         MF->getConstantPool()->getConstants()[CPID];
     if (!CPE.isMachineConstantPoolEntry()) {
diff --git a/llvm/lib/IR/Mangler.cpp b/llvm/lib/IR/Mangler.cpp
index 3b9c00cf993f3..6c3efb7bd9a39 100644
--- a/llvm/lib/IR/Mangler.cpp
+++ b/llvm/lib/IR/Mangler.cpp
@@ -223,7 +223,7 @@ void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
     bool NeedQuotes = GV->hasName() && !canBeUnquotedInDirective(GV->getName());
     if (NeedQuotes)
       OS << "\"";
-    if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment()) {
+    if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment() || TT.isUEFI()) {
       std::string Flag;
       raw_string_ostream FlagOS(Flag);
       Mangler.getNameWithPrefix(FlagOS, GV, false);
@@ -249,7 +249,7 @@ void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
       OS << "\"";
 
     if (!GV->getValueType()->isFunctionTy()) {
-      if (TT.isWindowsMSVCEnvironment())
+      if (TT.isWindowsMSVCEnvironment() || TT.isUEFI())
         OS << ",DATA";
       else
         OS << ",data";
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index f01e47b41cf5e..0fbf2823922cf 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -985,7 +985,7 @@ static void emitNonLazyStubs(MachineModuleInfo *MMI, MCStreamer &OutStreamer) {
 /// selected floating-point instructions.
 static bool usesMSVCFloatingPoint(const Triple &TT, const Module &M) {
   // Only needed for MSVC
-  if (!TT.isWindowsMSVCEnvironment())
+  if (!TT.isWindowsMSVCEnvironment() || !TT.isUEFI())
     return false;
 
   for (const Function &F : M) {
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index b5a6545f7e466..8732b6273ff3c 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -325,7 +325,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
 
   bool isTargetUEFI64() const { return Is64Bit && isUEFI(); }
 
-  bool isTargetWin64() const { return Is64Bit && (isOSWindows() || isUEFI()); }
+  bool isTargetWin64() const { return Is64Bit && isOSWindows(); }
 
   bool isTargetWin32() const { return !Is64Bit && isOSWindows(); }
 

>From df957f3ee7c4609d79d793e3b9478a9fa5426bcf Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Mon, 27 Jan 2025 17:06:17 -0800
Subject: [PATCH 05/10] simplify isUEFI triple checks

---
 llvm/include/llvm/TargetParser/Triple.h    | 4 ++++
 llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 4 ++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index 844f11feef414..fecdfd745c23e 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -636,6 +636,10 @@ class Triple {
     return getOS() == Triple::Win32;
   }
 
+  bool isOSWindowsOrUEFI() const {
+    return isOSWindows() || isUEFI();
+  }
+
   /// Checks if the environment is MSVC.
   bool isKnownWindowsMSVCEnvironment() const {
     return isOSWindows() && getEnvironment() == Triple::MSVC;
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 153a546a608a1..fdfbcd687b339 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -560,7 +560,7 @@ bool AsmPrinter::doInitialization(Module &M) {
 
   if (MAI->doesSupportDebugInformation()) {
     bool EmitCodeView = M.getCodeViewFlag();
-    if (EmitCodeView && (TM.getTargetTriple().isOSWindows() || TM.getTargetTriple().isUEFI()))
+    if (EmitCodeView && TM.getTargetTriple().isOSWindowsOrUEFI())
       DebugHandlers.push_back(std::make_unique<CodeViewDebug>(this));
     if (!EmitCodeView || M.getDwarfVersion()) {
       if (hasDebugInfo()) {
@@ -4070,7 +4070,7 @@ const MCExpr *AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
 
 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
 MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
-  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment() || getSubtargetInfo().getTargetTriple().isUEFI()) {
+  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment()) {
     const MachineConstantPoolEntry &CPE =
         MF->getConstantPool()->getConstants()[CPID];
     if (!CPE.isMachineConstantPoolEntry()) {

>From 08c5a794825a065d09df83c7ddfbe373e55726df Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 28 Jan 2025 09:09:56 -0800
Subject: [PATCH 06/10] Fix test failure

---
 llvm/include/llvm/TargetParser/Triple.h    | 5 ++---
 llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 3 ++-
 llvm/lib/IR/DataLayout.cpp                 | 2 +-
 llvm/lib/MC/MCContext.cpp                  | 2 +-
 llvm/lib/MC/TargetRegistry.cpp             | 3 +--
 llvm/lib/Target/X86/X86AsmPrinter.cpp      | 2 +-
 llvm/lib/Target/X86/X86ISelLowering.cpp    | 5 ++---
 llvm/lib/Target/X86/X86MCInstLower.cpp     | 2 +-
 llvm/lib/Target/X86/X86Subtarget.h         | 2 ++
 llvm/lib/Target/X86/X86TargetMachine.cpp   | 2 +-
 10 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/llvm/include/llvm/TargetParser/Triple.h b/llvm/include/llvm/TargetParser/Triple.h
index 24965bab78450..7d67966d17256 100644
--- a/llvm/include/llvm/TargetParser/Triple.h
+++ b/llvm/include/llvm/TargetParser/Triple.h
@@ -656,9 +656,8 @@ class Triple {
     return getOS() == Triple::Win32;
   }
 
-  bool isOSWindowsOrUEFI() const {
-    return isOSWindows() || isUEFI();
-  }
+  /// Tests whether the OS is Windows or UEFI.
+  bool isOSWindowsOrUEFI() const { return isOSWindows() || isUEFI(); }
 
   /// Checks if the environment is MSVC.
   bool isKnownWindowsMSVCEnvironment() const {
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index e77abf429e6b4..66d414c882a95 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -4091,7 +4091,8 @@ const MCExpr *AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
 
 /// GetCPISymbol - Return the symbol for the specified constant pool entry.
 MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
-  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment()) {
+  if (getSubtargetInfo().getTargetTriple().isWindowsMSVCEnvironment() ||
+      getSubtargetInfo().getTargetTriple().isUEFI()) {
     const MachineConstantPoolEntry &CPE =
         MF->getConstantPool()->getConstants()[CPID];
     if (!CPE.isMachineConstantPoolEntry()) {
diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp
index 95a5e5989ad00..0cf0bfc9702d3 100644
--- a/llvm/lib/IR/DataLayout.cpp
+++ b/llvm/lib/IR/DataLayout.cpp
@@ -178,7 +178,7 @@ const char *DataLayout::getManglingComponent(const Triple &T) {
     return "-m:l";
   if (T.isOSBinFormatMachO())
     return "-m:o";
-  if ((T.isOSWindows() || T.isUEFI()) && T.isOSBinFormatCOFF())
+  if (T.isOSWindowsOrUEFI() && T.isOSBinFormatCOFF())
     return T.getArch() == Triple::x86 ? "-m:x" : "-m:w";
   if (T.isOSBinFormatXCOFF())
     return "-m:a";
diff --git a/llvm/lib/MC/MCContext.cpp b/llvm/lib/MC/MCContext.cpp
index 46222fcaa5b15..335febde3687c 100644
--- a/llvm/lib/MC/MCContext.cpp
+++ b/llvm/lib/MC/MCContext.cpp
@@ -85,7 +85,7 @@ MCContext::MCContext(const Triple &TheTriple, const MCAsmInfo *mai,
     Env = IsMachO;
     break;
   case Triple::COFF:
-    if (!TheTriple.isOSWindows() && !TheTriple.isUEFI())
+    if (!TheTriple.isOSWindowsOrUEFI())
       report_fatal_error(
           "Cannot initialize MC for non-Windows COFF object files.");
 
diff --git a/llvm/lib/MC/TargetRegistry.cpp b/llvm/lib/MC/TargetRegistry.cpp
index e1879f97aa567..a9e33c8349bdc 100644
--- a/llvm/lib/MC/TargetRegistry.cpp
+++ b/llvm/lib/MC/TargetRegistry.cpp
@@ -31,8 +31,7 @@ MCStreamer *Target::createMCObjectStreamer(
   case Triple::UnknownObjectFormat:
     llvm_unreachable("Unknown object format");
   case Triple::COFF:
-    assert((T.isOSWindows() || T.isUEFI()) &&
-           "only Windows and UEFI COFF are supported");
+    assert(T.isOSWindowsOrUEFI() && "only Windows and UEFI COFF are supported");
     S = COFFStreamerCtorFn(Ctx, std::move(TAB), std::move(OW),
                            std::move(Emitter));
     break;
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 0fbf2823922cf..f01e47b41cf5e 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -985,7 +985,7 @@ static void emitNonLazyStubs(MachineModuleInfo *MMI, MCStreamer &OutStreamer) {
 /// selected floating-point instructions.
 static bool usesMSVCFloatingPoint(const Triple &TT, const Module &M) {
   // Only needed for MSVC
-  if (!TT.isWindowsMSVCEnvironment() || !TT.isUEFI())
+  if (!TT.isWindowsMSVCEnvironment())
     return false;
 
   for (const Function &F : M) {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8ce6caf38e273..1760f64f85d1f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60999,7 +60999,7 @@ bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
 
   // No inline stack probe for Windows and UEFI, they have their own mechanism.
-  if (Subtarget.isOSWindows() || Subtarget.isUEFI() ||
+  if (Subtarget.isOSWindowsOrUEFI() ||
       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
     return false;
 
@@ -61025,8 +61025,7 @@ X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
 
   // Generally, if we aren't on Windows or UEFI, the platform ABI does not
   // include support for stack probes, so don't emit them.
-  if (!(Subtarget.isOSWindows() || Subtarget.isUEFI()) ||
-      Subtarget.isTargetMachO() ||
+  if (!(Subtarget.isOSWindowsOrUEFI()) || Subtarget.isTargetMachO() ||
       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
     return "";
 
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index 7bae16c066716..645a9baeba65c 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -1710,7 +1710,7 @@ static void printZeroExtend(const MachineInstr *MI, MCStreamer &OutStreamer,
 
 void X86AsmPrinter::EmitSEHInstruction(const MachineInstr *MI) {
   assert(MF->hasWinCFI() && "SEH_ instruction in function without WinCFI?");
-  assert((getSubtarget().isOSWindows() || TM.getTargetTriple().isUEFI()) &&
+  assert(getSubtarget().isOSWindowsOrUEFI() &&
          "SEH_ instruction Windows and UEFI only");
 
   // Use the .cv_fpo directives if we're emitting CodeView on 32-bit x86.
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index c479306a7f43c..61ce5f2e67eae 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -328,6 +328,8 @@ class X86Subtarget final : public X86GenSubtargetInfo {
 
   bool isUEFI() const { return TargetTriple.isUEFI(); }
 
+  bool isOSWindowsOrUEFI() const { return TargetTriple.isOSWindowsOrUEFI(); }
+
   bool isTargetUEFI64() const { return Is64Bit && isUEFI(); }
 
   bool isTargetWin64() const { return Is64Bit && isOSWindows(); }
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 64d170d259b50..b4b4d414de045 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -484,7 +484,7 @@ void X86PassConfig::addIRPasses() {
 
   // Add Control Flow Guard checks.
   const Triple &TT = TM->getTargetTriple();
-  if (TT.isOSWindows() || TT.isUEFI()) {
+  if (TT.isOSWindowsOrUEFI()) {
     if (TT.getArch() == Triple::x86_64) {
       addPass(createCFGuardDispatchPass());
     } else {

>From 5d801d520fcf3021d133ecf400fbc81d2d50a5df Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Mon, 10 Mar 2025 23:18:33 +0000
Subject: [PATCH 07/10] Skip SSE test

---
 llvm/test/CodeGen/X86/sse-regcall.ll | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/test/CodeGen/X86/sse-regcall.ll b/llvm/test/CodeGen/X86/sse-regcall.ll
index 871fdc1c0b9f2..03b9e123eea48 100644
--- a/llvm/test/CodeGen/X86/sse-regcall.ll
+++ b/llvm/test/CodeGen/X86/sse-regcall.ll
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+sse | FileCheck --check-prefix=WIN32 %s
 ; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse | FileCheck --check-prefix=WIN64 %s
-; RUN: llc < %s -mtriple=x86_64-unknown-uefi -mattr=+sse | FileCheck --check-prefix=WIN64 %s
 ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+sse | FileCheck --check-prefix=LINUXOSX %s
 
 ; Test regcall when receiving/returning i1

>From ec3732b77d8f0c87e1d246dbeca5e3960f381fc5 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 18 Mar 2025 03:02:05 +0000
Subject: [PATCH 08/10] Clean up UEFI backend

---
 llvm/lib/IR/Mangler.cpp                  |   3 +-
 llvm/lib/Target/X86/X86CallingConv.td    | 220 ++++++++++++-----------
 llvm/lib/Target/X86/X86ISelLowering.cpp  |  20 +--
 llvm/lib/Target/X86/X86RegisterInfo.cpp  |   2 +-
 llvm/lib/Target/X86/X86Subtarget.h       |   7 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp |   2 +-
 6 files changed, 129 insertions(+), 125 deletions(-)

diff --git a/llvm/lib/IR/Mangler.cpp b/llvm/lib/IR/Mangler.cpp
index 6c3efb7bd9a39..cba8ee8c707df 100644
--- a/llvm/lib/IR/Mangler.cpp
+++ b/llvm/lib/IR/Mangler.cpp
@@ -223,7 +223,8 @@ void llvm::emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
     bool NeedQuotes = GV->hasName() && !canBeUnquotedInDirective(GV->getName());
     if (NeedQuotes)
       OS << "\"";
-    if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment() || TT.isUEFI()) {
+    if (TT.isWindowsGNUEnvironment() || TT.isWindowsCygwinEnvironment() ||
+        TT.isUEFI()) {
       std::string Flag;
       raw_string_ostream FlagOS(Flag);
       Mangler.getNameWithPrefix(FlagOS, GV, false);
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 8dcdc12c3efb8..80d26422d0cec 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -236,56 +236,59 @@ def RetCC_#NAME : CallingConv<[
 //===----------------------------------------------------------------------===//
 
 // Return-value conventions common to all X86 CC's.
-def RetCC_X86Common : CallingConv<[
-  // Scalar values are returned in AX first, then DX.  For i8, the ABI
-  // requires the values to be in AL and AH, however this code uses AL and DL
-  // instead. This is because using AH for the second register conflicts with
-  // the way LLVM does multiple return values -- a return of {i16,i8} would end
-  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
-  // for functions that return two i8 values are currently expected to pack the
-  // values into an i16 (which uses AX, and thus AL:AH).
-  //
-  // For code that doesn't care about the ABI, we allow returning more than two
-  // integer values in registers.
-  CCIfType<[v1i1],  CCPromoteToType<i8>>,
-  CCIfType<[i1],  CCPromoteToType<i8>>,
-  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
-  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
-  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
-  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
-
-  // Boolean vectors of AVX-512 are returned in SIMD registers.
-  // The call from AVX to AVX-512 function should work,
-  // since the boolean types in AVX/AVX2 are promoted by default.
-  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
-  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
-  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
-  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
-  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
-  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
-
-  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
-  // can only be used by ABI non-compliant code. If the target doesn't have XMM
-  // registers, it won't have vector types.
-  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
-            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
-
-  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
-  // can only be used by ABI non-compliant code. This vector type is only
-  // supported while using the AVX target feature.
-  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
-            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
-
-  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
-  // can only be used by ABI non-compliant code. This vector type is only
-  // supported while using the AVX-512 target feature.
-  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
-            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
-
-  // Long double types are always returned in FP0 (even with SSE),
-  // except on Win64.
-  CCIfNotSubtarget<"isTargetWin64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
-]>;
+def RetCC_X86Common
+    : CallingConv<[
+          // Scalar values are returned in AX first, then DX.  For i8, the ABI
+          // requires the values to be in AL and AH, however this code uses AL
+          // and DL instead. This is because using AH for the second register
+          // conflicts with the way LLVM does multiple return values -- a return
+          // of {i16,i8} would end up in AX and AH, which overlap. Front-ends
+          // wishing to conform to the ABI for functions that return two i8
+          // values are currently expected to pack the values into an i16 (which
+          // uses AX, and thus AL:AH).
+          //
+          // For code that doesn't care about the ABI, we allow returning more
+          // than two integer values in registers.
+          CCIfType<[v1i1], CCPromoteToType<i8>>,
+          CCIfType<[i1], CCPromoteToType<i8>>,
+          CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
+          CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
+          CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
+          CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
+
+          // Boolean vectors of AVX-512 are returned in SIMD registers.
+          // The call from AVX to AVX-512 function should work,
+          // since the boolean types in AVX/AVX2 are promoted by default.
+          CCIfType<[v2i1], CCPromoteToType<v2i64>>,
+          CCIfType<[v4i1], CCPromoteToType<v4i32>>,
+          CCIfType<[v8i1], CCPromoteToType<v8i16>>,
+          CCIfType<[v16i1], CCPromoteToType<v16i8>>,
+          CCIfType<[v32i1], CCPromoteToType<v32i8>>,
+          CCIfType<[v64i1], CCPromoteToType<v64i8>>,
+
+          // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2
+          // and XMM3 can only be used by ABI non-compliant code. If the target
+          // doesn't have XMM registers, it won't have vector types.
+          CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
+                   CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
+
+          // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2
+          // and YMM3 can only be used by ABI non-compliant code. This vector
+          // type is only supported while using the AVX target feature.
+          CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
+                   CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
+
+          // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2
+          // and ZMM3 can only be used by ABI non-compliant code. This vector
+          // type is only supported while using the AVX-512 target feature.
+          CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32,
+                    v8f64],
+                   CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
+
+          // Long double types are always returned in FP0 (even with SSE),
+          // except on Win64.
+          CCIfNotSubtarget<"isTargetWindowsOrUEFI64()",
+                           CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>]>;
 
 // X86-32 C return-value convention.
 def RetCC_X86_32_C : CallingConv<[
@@ -466,49 +469,46 @@ def RetCC_X86_32 : CallingConv<[
 ]>;
 
 // This is the root return-value convention for the X86-64 backend.
-def RetCC_X86_64 : CallingConv<[
-  // HiPE uses RetCC_X86_64_HiPE
-  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
+def RetCC_X86_64
+    : CallingConv<[
+          // HiPE uses RetCC_X86_64_HiPE
+          CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
 
-  // Handle AnyReg calls.
-  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
+          // Handle AnyReg calls.
+          CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
 
-  // Handle Swift calls.
-  CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
-  CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
+          // Handle Swift calls.
+          CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
+          CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
 
-  // Handle explicit CC selection
-  CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
-  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
+          // Handle explicit CC selection
+          CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
+          CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
 
-  // Handle Vectorcall CC
-  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
-
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
+          // Handle Vectorcall CC
+          CCIfCC<"CallingConv::X86_VectorCall",
+                 CCDelegateTo<RetCC_X86_64_Vectorcall>>,
 
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetUEFI64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
+          CCIfCC<"CallingConv::X86_RegCall",
+                 CCIfSubtarget<
+                     "isTargetWin64()",
+                     CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
 
-  CCIfCC<"CallingConv::X86_RegCall",
-          CCIfSubtarget<"isTargetWin64()",                        
-                        CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
+          CCIfCC<"CallingConv::X86_RegCall",
+                 CCIfSubtarget<"isTargetWin64()",
+                               CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
 
-  CCIfCC<"CallingConv::X86_RegCall",
-        CCIfSubtarget<"isTargetUEFI64()",
-                      CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
+          CCIfCC<"CallingConv::X86_RegCall",
+                 CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
 
-  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
-          
-  // Mingw64 and native Win64 use Win64 CC
-  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
+          // Mingw64 and native Win64 use Win64 CC
+          CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
 
-  // UEFI64 uses Win64 CC
-  CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<RetCC_X86_Win64_C>>,
+          // UEFI64 uses Win64 CC
+          CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<RetCC_X86_Win64_C>>,
 
-  // Otherwise, drop to normal X86-64 CC
-  CCDelegateTo<RetCC_X86_64_C>
-]>;
+          // Otherwise, drop to normal X86-64 CC
+          CCDelegateTo<RetCC_X86_64_C>]>;
 
 // This is the return-value convention used for the entire X86 backend.
 let Entry = 1 in
@@ -1078,31 +1078,35 @@ def CC_X86_32 : CallingConv<[
 ]>;
 
 // This is the root argument convention for the X86-64 backend.
-def CC_X86_64 : CallingConv<[
-  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
-  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
-  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
-  CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
-  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
-  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetUEFI64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
-  CCIfCC<"CallingConv::X86_RegCall",
-    CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
-  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
-  CCIfCC<"CallingConv::PreserveNone", CCDelegateTo<CC_X86_64_Preserve_None>>,
-  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
-
-  // Mingw64 and native Win64 use Win64 CC
-  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
-
-  // Otherwise, drop to normal X86-64 CC
-  CCDelegateTo<CC_X86_64_C>
-]>;
+def CC_X86_64
+    : CallingConv<
+          [CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
+           CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
+           CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
+           CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
+           CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
+           CCIfCC<"CallingConv::X86_VectorCall",
+                  CCDelegateTo<CC_X86_Win64_VectorCall>>,
+           CCIfCC<"CallingConv::X86_RegCall",
+                  CCIfSubtarget<
+                      "isTargetWin64()",
+                      CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
+           CCIfCC<"CallingConv::X86_RegCall",
+                  CCIfSubtarget<"isTargetWin64()",
+                                CCDelegateTo<CC_X86_Win64_RegCall>>>,
+           CCIfCC<"CallingConv::X86_RegCall",
+                  CCDelegateTo<CC_X86_SysV64_RegCall>>,
+           CCIfCC<"CallingConv::PreserveNone",
+                  CCDelegateTo<CC_X86_64_Preserve_None>>,
+           CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
+
+           // Mingw64 and native Win64 use Win64 CC
+           CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
+           // UEFI uses Win64 CC
+           CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<CC_X86_Win64_C>>,
+
+           // Otherwise, drop to normal X86-64 CC
+           CCDelegateTo<CC_X86_64_C>]>;
 
 // This is the argument convention used for the entire X86 backend.
 let Entry = 1 in
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 48a1fd5193eb0..e51ad7f48645b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2596,7 +2596,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
   }
 
-  if (Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) {
+  if (Subtarget.isTargetWin64()) {
     setOperationAction(ISD::SDIV, MVT::i128, Custom);
     setOperationAction(ISD::UDIV, MVT::i128, Custom);
     setOperationAction(ISD::SREM, MVT::i128, Custom);
@@ -19861,8 +19861,7 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
   else if (isLegalConversion(SrcVT, true, Subtarget))
     return Op;
 
-  if ((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
-      SrcVT == MVT::i128)
+  if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
     return LowerWin64_INT128_TO_FP(Op, DAG);
 
   if (SDValue Extract = vectorizeExtractedCast(Op, dl, DAG, Subtarget))
@@ -20369,8 +20368,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
   if (DstVT.isVector())
     return lowerUINT_TO_FP_vec(Op, dl, DAG, Subtarget);
 
-  if ((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
-      SrcVT == MVT::i128)
+  if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
     return LowerWin64_INT128_TO_FP(Op, DAG);
 
   if (SDValue Extract = vectorizeExtractedCast(Op, dl, DAG, Subtarget))
@@ -29755,8 +29753,7 @@ static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
 }
 
 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
-  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
-         "Unexpected target");
+  assert(Subtarget.isTargetWin64() && "Unexpected target");
   EVT VT = Op.getValueType();
   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
          "Unexpected return type for lowering");
@@ -29822,8 +29819,7 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
                                                    SelectionDAG &DAG,
                                                    SDValue &Chain) const {
-  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
-         "Unexpected target");
+  assert(Subtarget.isTargetWin64() && "Unexpected target");
   EVT VT = Op.getValueType();
   bool IsStrict = Op->isStrictFPOpcode();
 
@@ -29856,8 +29852,7 @@ SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
 
 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
                                                    SelectionDAG &DAG) const {
-  assert((Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64()) &&
-         "Unexpected target");
+  assert(Subtarget.isTargetWin64() && "Unexpected target");
   EVT VT = Op.getValueType();
   bool IsStrict = Op->isStrictFPOpcode();
 
@@ -34119,8 +34114,7 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
       return;
     }
 
-    if (VT == MVT::i128 &&
-        (Subtarget.isTargetWin64() || Subtarget.isTargetUEFI64())) {
+    if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
       SDValue Chain;
       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
       Results.push_back(V);
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 5c655227d248a..9a2fcde7f766c 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -330,7 +330,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
   }
   case CallingConv::X86_RegCall:
     if (Is64Bit) {
-      if (IsWin64 || IsUEFI64) {
+      if (IsWin64) {
         return (HasSSE ? CSR_Win64_RegCall_SaveList :
                          CSR_Win64_RegCall_NoSSE_SaveList);
       } else {
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index b611319cc3226..e68420e4a3cb3 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -333,6 +333,10 @@ class X86Subtarget final : public X86GenSubtargetInfo {
 
   bool isOSWindowsOrUEFI() const { return TargetTriple.isOSWindowsOrUEFI(); }
 
+  bool isTargetWindowsOrUEFI64() const {
+    return isTargetWin64() || isTargetUEFI64();
+  }
+
   bool isTargetUEFI64() const { return Is64Bit && isUEFI(); }
 
   bool isTargetWin64() const { return Is64Bit && isOSWindows(); }
@@ -353,6 +357,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
     // On Win64 and UEFI64, all these conventions just use the default
     // convention.
     case CallingConv::C:
+      return isTargetWin64() || isTargetUEFI64();
     case CallingConv::Fast:
     case CallingConv::Tail:
     case CallingConv::Swift:
@@ -362,7 +367,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
     case CallingConv::X86_ThisCall:
     case CallingConv::X86_VectorCall:
     case CallingConv::Intel_OCL_BI:
-      return isTargetWin64() || isTargetUEFI64();
+      return isTargetWin64();
     // This convention allows using the Win64 convention on other targets.
     case CallingConv::Win64:
       return true;
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 20ecd511f8dcf..4cecbbf27aa30 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -484,7 +484,7 @@ void X86PassConfig::addIRPasses() {
 
   // Add Control Flow Guard checks.
   const Triple &TT = TM->getTargetTriple();
-  if (TT.isOSWindowsOrUEFI()) {
+  if (TT.isOSWindows()) {
     if (TT.getArch() == Triple::x86_64) {
       addPass(createCFGuardDispatchPass());
     } else {

>From c27af6d27149935b662b04bc0a40bf199758c241 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 18 Mar 2025 03:39:56 +0000
Subject: [PATCH 09/10] Fix Tablegen file

---
 llvm/lib/Target/X86/X86CallingConv.td | 213 ++++++++++++--------------
 1 file changed, 100 insertions(+), 113 deletions(-)

diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 80d26422d0cec..14092278295be 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -236,59 +236,56 @@ def RetCC_#NAME : CallingConv<[
 //===----------------------------------------------------------------------===//
 
 // Return-value conventions common to all X86 CC's.
-def RetCC_X86Common
-    : CallingConv<[
-          // Scalar values are returned in AX first, then DX.  For i8, the ABI
-          // requires the values to be in AL and AH, however this code uses AL
-          // and DL instead. This is because using AH for the second register
-          // conflicts with the way LLVM does multiple return values -- a return
-          // of {i16,i8} would end up in AX and AH, which overlap. Front-ends
-          // wishing to conform to the ABI for functions that return two i8
-          // values are currently expected to pack the values into an i16 (which
-          // uses AX, and thus AL:AH).
-          //
-          // For code that doesn't care about the ABI, we allow returning more
-          // than two integer values in registers.
-          CCIfType<[v1i1], CCPromoteToType<i8>>,
-          CCIfType<[i1], CCPromoteToType<i8>>,
-          CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>,
-          CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
-          CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
-          CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
-
-          // Boolean vectors of AVX-512 are returned in SIMD registers.
-          // The call from AVX to AVX-512 function should work,
-          // since the boolean types in AVX/AVX2 are promoted by default.
-          CCIfType<[v2i1], CCPromoteToType<v2i64>>,
-          CCIfType<[v4i1], CCPromoteToType<v4i32>>,
-          CCIfType<[v8i1], CCPromoteToType<v8i16>>,
-          CCIfType<[v16i1], CCPromoteToType<v16i8>>,
-          CCIfType<[v32i1], CCPromoteToType<v32i8>>,
-          CCIfType<[v64i1], CCPromoteToType<v64i8>>,
-
-          // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2
-          // and XMM3 can only be used by ABI non-compliant code. If the target
-          // doesn't have XMM registers, it won't have vector types.
-          CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
-                   CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
-
-          // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2
-          // and YMM3 can only be used by ABI non-compliant code. This vector
-          // type is only supported while using the AVX target feature.
-          CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
-                   CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
-
-          // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2
-          // and ZMM3 can only be used by ABI non-compliant code. This vector
-          // type is only supported while using the AVX-512 target feature.
-          CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32,
-                    v8f64],
-                   CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
-
-          // Long double types are always returned in FP0 (even with SSE),
-          // except on Win64.
-          CCIfNotSubtarget<"isTargetWindowsOrUEFI64()",
-                           CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>]>;
+def RetCC_X86Common : CallingConv<[
+  // Scalar values are returned in AX first, then DX.  For i8, the ABI
+  // requires the values to be in AL and AH, however this code uses AL and DL
+  // instead. This is because using AH for the second register conflicts with
+  // the way LLVM does multiple return values -- a return of {i16,i8} would end
+  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
+  // for functions that return two i8 values are currently expected to pack the
+  // values into an i16 (which uses AX, and thus AL:AH).
+  //
+  // For code that doesn't care about the ABI, we allow returning more than two
+  // integer values in registers.
+  CCIfType<[v1i1],  CCPromoteToType<i8>>,
+  CCIfType<[i1],  CCPromoteToType<i8>>,
+  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
+  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
+  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
+  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
+
+  // Boolean vectors of AVX-512 are returned in SIMD registers.
+  // The call from AVX to AVX-512 function should work,
+  // since the boolean types in AVX/AVX2 are promoted by default.
+  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
+  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
+  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
+  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
+  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
+  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
+
+  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
+  // can only be used by ABI non-compliant code. If the target doesn't have XMM
+  // registers, it won't have vector types.
+  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
+            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
+
+  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
+  // can only be used by ABI non-compliant code. This vector type is only
+  // supported while using the AVX target feature.
+  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
+            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+
+  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
+  // can only be used by ABI non-compliant code. This vector type is only
+  // supported while using the AVX-512 target feature.
+  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
+            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
+
+  // Long double types are always returned in FP0 (even with SSE),
+  // except on Win64.
+  CCIfNotSubtarget<"isTargetWindowsOrUEFI64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
+]>;
 
 // X86-32 C return-value convention.
 def RetCC_X86_32_C : CallingConv<[
@@ -469,46 +466,42 @@ def RetCC_X86_32 : CallingConv<[
 ]>;
 
 // This is the root return-value convention for the X86-64 backend.
-def RetCC_X86_64
-    : CallingConv<[
-          // HiPE uses RetCC_X86_64_HiPE
-          CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
+def RetCC_X86_64 : CallingConv<[
+  // HiPE uses RetCC_X86_64_HiPE
+  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
 
-          // Handle AnyReg calls.
-          CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
+  // Handle AnyReg calls.
+  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
 
-          // Handle Swift calls.
-          CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
-          CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
+  // Handle Swift calls.
+  CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
+  CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
 
-          // Handle explicit CC selection
-          CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
-          CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
+  // Handle explicit CC selection
+  CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
+  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
 
-          // Handle Vectorcall CC
-          CCIfCC<"CallingConv::X86_VectorCall",
-                 CCDelegateTo<RetCC_X86_64_Vectorcall>>,
+  // Handle Vectorcall CC
+  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
 
-          CCIfCC<"CallingConv::X86_RegCall",
-                 CCIfSubtarget<
-                     "isTargetWin64()",
-                     CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
-
-          CCIfCC<"CallingConv::X86_RegCall",
-                 CCIfSubtarget<"isTargetWin64()",
-                               CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
 
-          CCIfCC<"CallingConv::X86_RegCall",
-                 CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+          CCIfSubtarget<"isTargetWin64()",                        
+                        CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
 
-          // Mingw64 and native Win64 use Win64 CC
-          CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
+  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
+          
+  // Mingw64 and native Win64 use Win64 CC
+  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
 
-          // UEFI64 uses Win64 CC
-          CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<RetCC_X86_Win64_C>>,
+  // UEFI64 uses Win64 CC
+  CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<RetCC_X86_Win64_C>>,
 
-          // Otherwise, drop to normal X86-64 CC
-          CCDelegateTo<RetCC_X86_64_C>]>;
+  // Otherwise, drop to normal X86-64 CC
+  CCDelegateTo<RetCC_X86_64_C>
+]>;
 
 // This is the return-value convention used for the entire X86 backend.
 let Entry = 1 in
@@ -1078,35 +1071,29 @@ def CC_X86_32 : CallingConv<[
 ]>;
 
 // This is the root argument convention for the X86-64 backend.
-def CC_X86_64
-    : CallingConv<
-          [CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
-           CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
-           CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
-           CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
-           CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
-           CCIfCC<"CallingConv::X86_VectorCall",
-                  CCDelegateTo<CC_X86_Win64_VectorCall>>,
-           CCIfCC<"CallingConv::X86_RegCall",
-                  CCIfSubtarget<
-                      "isTargetWin64()",
-                      CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
-           CCIfCC<"CallingConv::X86_RegCall",
-                  CCIfSubtarget<"isTargetWin64()",
-                                CCDelegateTo<CC_X86_Win64_RegCall>>>,
-           CCIfCC<"CallingConv::X86_RegCall",
-                  CCDelegateTo<CC_X86_SysV64_RegCall>>,
-           CCIfCC<"CallingConv::PreserveNone",
-                  CCDelegateTo<CC_X86_64_Preserve_None>>,
-           CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
-
-           // Mingw64 and native Win64 use Win64 CC
-           CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
-           // UEFI uses Win64 CC
-           CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<CC_X86_Win64_C>>,
-
-           // Otherwise, drop to normal X86-64 CC
-           CCDelegateTo<CC_X86_64_C>]>;
+def CC_X86_64 : CallingConv<[
+  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
+  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
+  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
+  CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
+  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
+  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+    CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<CC_X86_Win64_RegCallv4>>>>,
+  CCIfCC<"CallingConv::X86_RegCall",
+    CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
+  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
+  CCIfCC<"CallingConv::PreserveNone", CCDelegateTo<CC_X86_64_Preserve_None>>,
+  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
+
+  // Mingw64 and native Win64 use Win64 CC
+  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
+  // UEFI uses Win64 CC
+  CCIfSubtarget<"isTargetUEFI64()", CCDelegateTo<CC_X86_Win64_C>>,
+
+  // Otherwise, drop to normal X86-64 CC
+  CCDelegateTo<CC_X86_64_C>
+]>;
 
 // This is the argument convention used for the entire X86 backend.
 let Entry = 1 in

>From d2f1a5d3c5d870523b2e0984c4eede32c80f94a8 Mon Sep 17 00:00:00 2001
From: prabhukr <prabhukr at google.com>
Date: Tue, 18 Mar 2025 22:49:19 +0000
Subject: [PATCH 10/10] Format td file.

---
 llvm/lib/Target/X86/X86CallingConv.td | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 14092278295be..82f6dfd5a3b07 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -488,9 +488,8 @@ def RetCC_X86_64 : CallingConv<[
     CCIfSubtarget<"isTargetWin64()", CCIfRegCallv4<CCDelegateTo<RetCC_X86_Win64_RegCallv4>>>>,
 
   CCIfCC<"CallingConv::X86_RegCall",
-          CCIfSubtarget<"isTargetWin64()",                        
+          CCIfSubtarget<"isTargetWin64()",
                         CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
-
   CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
           
   // Mingw64 and native Win64 use Win64 CC



More information about the llvm-commits mailing list