[llvm] [win][x64] Guarantee shape of tail call to a control flow guard function (PR #174108)

Daniel Paoliello via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 31 11:06:20 PST 2025


https://github.com/dpaoliello created https://github.com/llvm/llvm-project/pull/174108

For the purposes of Import Call Optimization, a tail call to a Control Flow Guard function must always be lowered as `TAILJMPm64_REX`. This is useful in general, as we know that the call will be handled via a fixup rather than needing scratch registers for base+index.

This change introduces a new psuedo-instruction, `TCRETURN_CFG`, specifically for the purposes of noting that there is a "tail-call return" control flow guard function call, which is then always lowered to `TAILJMPm64_REX` instead of relying on pattern matching.

>From 3abe78ae6456c01f5168a5d08b5b7bfb2b47c02a Mon Sep 17 00:00:00 2001
From: Daniel Paoliello <danpao at microsoft.com>
Date: Tue, 23 Dec 2025 10:00:41 -0800
Subject: [PATCH] [win][x64] Guarantee shape of tail call to a control flow
 guard function

---
 llvm/include/llvm/Transforms/CFGuard.h      |  2 ++
 llvm/lib/Target/X86/X86AsmPrinter.cpp       |  3 ++-
 llvm/lib/Target/X86/X86ExpandPseudo.cpp     | 13 ++++++++++++-
 llvm/lib/Target/X86/X86FrameLowering.cpp    |  3 ++-
 llvm/lib/Target/X86/X86ISelLowering.cpp     |  1 +
 llvm/lib/Target/X86/X86ISelLowering.h       |  3 +++
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 17 ++++++++++++++++-
 llvm/lib/Target/X86/X86InstrControl.td      |  5 +++++
 llvm/lib/Target/X86/X86InstrFragments.td    |  3 +++
 llvm/lib/Target/X86/X86RegisterInfo.cpp     |  1 +
 llvm/lib/Transforms/CFGuard/CFGuard.cpp     |  5 +++++
 llvm/test/CodeGen/X86/cfguard-checks.ll     |  3 +--
 12 files changed, 53 insertions(+), 6 deletions(-)

diff --git a/llvm/include/llvm/Transforms/CFGuard.h b/llvm/include/llvm/Transforms/CFGuard.h
index b81db8f487965..62e1195c8222e 100644
--- a/llvm/include/llvm/Transforms/CFGuard.h
+++ b/llvm/include/llvm/Transforms/CFGuard.h
@@ -15,6 +15,7 @@
 
 namespace llvm {
 
+class CallBase;
 class FunctionPass;
 class GlobalValue;
 
@@ -35,6 +36,7 @@ FunctionPass *createCFGuardCheckPass();
 /// Insert Control FLow Guard dispatches on indirect function calls.
 FunctionPass *createCFGuardDispatchPass();
 
+bool isCFGuardCall(const CallBase *CB);
 bool isCFGuardFunction(const GlobalValue *GV);
 
 } // namespace llvm
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 84b921222a116..1394ccb7b5c90 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -480,7 +480,8 @@ static bool isIndirectBranchOrTailCall(const MachineInstr &MI) {
          Opc == X86::TCRETURN_HIPE32ri || Opc == X86::TCRETURNmi ||
          Opc == X86::TCRETURN_WINmi64 || Opc == X86::TCRETURNri64 ||
          Opc == X86::TCRETURNmi64 || Opc == X86::TCRETURNri64_ImpCall ||
-         Opc == X86::TAILJMPr64_REX || Opc == X86::TAILJMPm64_REX;
+         Opc == X86::TAILJMPr64_REX || Opc == X86::TAILJMPm64_REX ||
+         Opc == X86::TCRETURN_CFG;
 }
 
 void X86AsmPrinter::emitBasicBlockEnd(const MachineBasicBlock &MBB) {
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index 6a18086cae29f..1017fa045bc2e 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -277,7 +277,8 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
   case X86::TCRETURNri64:
   case X86::TCRETURNri64_ImpCall:
   case X86::TCRETURNmi64:
-  case X86::TCRETURN_WINmi64: {
+  case X86::TCRETURN_WINmi64:
+  case X86::TCRETURN_CFG: {
     bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64 ||
                  Opcode == X86::TCRETURN_WINmi64;
     MachineOperand &JumpTarget = MBBI->getOperand(0);
@@ -358,6 +359,16 @@ bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,
       BuildMI(MBB, MBBI, DL,
               TII->get(IsX64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
           .add(JumpTarget);
+    } else if (Opcode == X86::TCRETURN_CFG) {
+      // Tail call to a Control Flow Guard func is lowered as a jump to the
+      // function via RIP.
+      BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPm64_REX))
+          .addReg(X86::RIP)
+          .addImm(1)
+          .addReg(0)
+          .addGlobalAddress(JumpTarget.getGlobal(), 0,
+                            JumpTarget.getTargetFlags())
+          .addReg(0);
     } else {
       assert(!IsX64 && "Win64 and UEFI64 require REX for indirect jumps.");
       JumpTarget.setIsKill();
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 8bca6344d6521..7d2858260842f 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -2401,7 +2401,8 @@ static bool isTailCallOpcode(unsigned Opc) {
          Opc == X86::TCRETURN_HIPE32ri || Opc == X86::TCRETURNdi ||
          Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||
          Opc == X86::TCRETURNri64_ImpCall || Opc == X86::TCRETURNdi64 ||
-         Opc == X86::TCRETURNmi64 || Opc == X86::TCRETURN_WINmi64;
+         Opc == X86::TCRETURNmi64 || Opc == X86::TCRETURN_WINmi64 ||
+         Opc == X86::TCRETURN_CFG;
 }
 
 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3f1db1a500e05..abdcaadfbc92f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -35657,6 +35657,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(CVTTP2UIS)
   NODE_NAME_CASE(MCVTTP2UIS)
   NODE_NAME_CASE(POP_FROM_X87_REG)
+  NODE_NAME_CASE(CFG_TC_RETURN)
   }
   return nullptr;
 #undef NODE_NAME_CASE
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index a528c311975d8..90b4d3abfde3e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -336,6 +336,9 @@ namespace llvm {
     /// the list of operands.
     TC_RETURN,
 
+    // Special case for tail call return for Control Flow Guard function calls.
+    CFG_TC_RETURN,
+
     // Vector move to low scalar and zero higher vector elements.
     VZEXT_MOVL,
 
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index ae9d0a162011f..69389c095bf74 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -27,6 +27,7 @@
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/Module.h"
+#include "llvm/Transforms/CFGuard.h"
 
 #define DEBUG_TYPE "x86-isel"
 
@@ -2431,6 +2432,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   }
 
   bool IsImpCall = false;
+  bool IsCFGuardTailCall = false;
   if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
     assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
     // In the 64-bit large code model, we have to make all calls
@@ -2448,6 +2450,18 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
              Callee.getValueType() == MVT::i32) {
     // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
     Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
+  } else if (isTailCall && isCFGuardCall(CB)) {
+    // We'll use a specific psuedo instruction for tail calls to control flow
+    // guard functions to guarantee the instruction used for the call. To do
+    // this we need to unwrap the load now and use the CFG Func GV as the
+    // callee.
+    IsCFGuardTailCall = true;
+    auto LoadNode = cast<LoadSDNode>(Callee);
+    GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(unwrapAddress(LoadNode->getBasePtr()));
+    assert(isCFGuardFunction(GA->getGlobal()) && "CFG Call should be to a guard function");
+    assert(LoadNode->getOffset()->isUndef() && "CFG Function load should not have an offset");
+    Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
+                                     0, X86II::MO_NO_FLAG);
   }
 
   SmallVector<SDValue, 8> Ops;
@@ -2552,7 +2566,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
     // should be computed from returns not tail calls.  Consider a void
     // function making a tail call to a function returning int.
     MF.getFrameInfo().setHasTailCall();
-    SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, Ops);
+    auto Opcode = IsCFGuardTailCall ? X86ISD::CFG_TC_RETURN : X86ISD::TC_RETURN;
+    SDValue Ret = DAG.getNode(Opcode, dl, MVT::Other, Ops);
 
     if (IsCFICall)
       Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td
index e8527cd73abb5..b24965ca610a1 100644
--- a/llvm/lib/Target/X86/X86InstrControl.td
+++ b/llvm/lib/Target/X86/X86InstrControl.td
@@ -376,6 +376,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
                                (ins i64mem_w64TC:$dst, i32imm:$offset),
                                []>, Sched<[WriteJumpLd]>;
 
+  let isPseudo = 1, mayLoad = 1 in
+  def TCRETURN_CFG   : PseudoI<(outs),
+                               (ins i64imm:$dst, i32imm:$offset),
+                               [(X86cfgtcret tglobaladdr:$dst, timm:$offset)]>, Sched<[WriteJumpLd]>;
+
   def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst),
                            []>, Sched<[WriteJump]>;
 
diff --git a/llvm/lib/Target/X86/X86InstrFragments.td b/llvm/lib/Target/X86/X86InstrFragments.td
index 116986a0fffea..42e7a7ebe198b 100644
--- a/llvm/lib/Target/X86/X86InstrFragments.td
+++ b/llvm/lib/Target/X86/X86InstrFragments.td
@@ -259,6 +259,9 @@ def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
                         [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
 
+def X86cfgtcret  : SDNode<"X86ISD::CFG_TC_RETURN",     SDT_X86TCRET,
+                        [SDNPHasChain,  SDNPOptInGlue, SDNPVariadic]>;
+
 def X86add_flag  : SDNode<"X86ISD::ADD",  SDTBinaryArithWithFlags,
                           [SDNPCommutative]>;
 def X86sub_flag  : SDNode<"X86ISD::SUB",  SDTBinaryArithWithFlags>;
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 72f38133e21ff..3052b09b01cc5 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -986,6 +986,7 @@ unsigned X86RegisterInfo::findDeadCallerSavedReg(
   case X86::TCRETURNri64_ImpCall:
   case X86::TCRETURNmi64:
   case X86::TCRETURN_WINmi64:
+  case X86::TCRETURN_CFG:
   case X86::EH_RETURN:
   case X86::EH_RETURN64: {
     LiveRegUnits LRU(*this);
diff --git a/llvm/lib/Transforms/CFGuard/CFGuard.cpp b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
index 46456706d46a1..a3420d6407c2e 100644
--- a/llvm/lib/Transforms/CFGuard/CFGuard.cpp
+++ b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
@@ -313,6 +313,11 @@ FunctionPass *llvm::createCFGuardDispatchPass() {
   return new CFGuard(CFGuardPass::Mechanism::Dispatch);
 }
 
+bool llvm::isCFGuardCall(const CallBase *CB) {
+  return CB->getCallingConv() == CallingConv::CFGuard_Check ||
+    CB->countOperandBundlesOfType(LLVMContext::OB_cfguardtarget);
+}
+
 bool llvm::isCFGuardFunction(const GlobalValue *GV) {
   if (GV->getLinkage() != GlobalValue::ExternalLinkage)
     return false;
diff --git a/llvm/test/CodeGen/X86/cfguard-checks.ll b/llvm/test/CodeGen/X86/cfguard-checks.ll
index 3a2de718e8a1b..c629ecb8c7261 100644
--- a/llvm/test/CodeGen/X86/cfguard-checks.ll
+++ b/llvm/test/CodeGen/X86/cfguard-checks.ll
@@ -213,8 +213,7 @@ entry:
   ; X64-LABEL: vmptr_thunk:
   ; X64:            movq (%rcx), %rax
   ; X64-NEXT:       movq 8(%rax), %rax
-  ; X64-NEXT:       movq __guard_dispatch_icall_fptr(%rip), %rdx
-  ; X64-NEXT:       rex64 jmpq *%rdx            # TAILCALL
+  ; X64-NEXT:       rex64 jmpq      *__guard_dispatch_icall_fptr(%rip)            # TAILCALL
   ; X64-NOT:   callq
 }
 



More information about the llvm-commits mailing list