[llvm] [x86_64][windows][swift] do not use Swift async extended frame for wi… (PR #80468)

Alex Lorenz via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 2 09:50:21 PST 2024


https://github.com/hyp updated https://github.com/llvm/llvm-project/pull/80468

>From 9675cef28889e263915df6d33665f601a54b81a9 Mon Sep 17 00:00:00 2001
From: Alex Lorenz <arphaman at gmail.com>
Date: Thu, 1 Feb 2024 11:54:26 -0800
Subject: [PATCH] [x86_64][windows][swift] do not use Swift async extended
 frame for windows x86_64 targets that use windows 64 prologue

Windows x86_64 stack frame layout is currently not compatible with Swift's async extended
frame, which reserves the slot right below RBP (RBP-8) for the async context
pointer, as it doesn't account for the fact that a stack object in a win64 frame can
be allocated at the same location. This can cause issues at runtime, for instance, Swift's
TCA test code has functions that fail because of this issue, as they spill a value to that
slack slot, which then gets overwritten by a store into address returned by the
@llvm.swift.async.context.addr() intrinsic (that ends up being RBP - 8), leading to an
incorrect value being used at a later point when that stack slot is being read from again.
This change drops the use of async extended frame for windows x86_64 subtargets and
instead uses the x32 based approach of allocating a separate stack slot for the stored
async context pointer.

Additionally, LLDB which is the primary consumer of the extended frame makes assumptions
like checking for a saved previous frame pointer at the current frame pointer address,
which is also incompatible with the windows x86_64 frame layout, as the previous frame
pointer is not guaranteed to be stored at the current frame pointer address. Therefore
the extended frame layout can be turned off to fix the current miscompile without
introducing regression into LLDB for windows x86_64 as it already doesn't work correctly.
I am still investigating what should be made for LLDB to support using an allocated
stack slot to store the async frame context instead of being located at RBP - 8 for
windows.
---
 llvm/lib/Target/X86/X86FrameLowering.cpp    |  5 ++
 llvm/lib/Target/X86/X86ISelLowering.cpp     | 21 +++++--
 llvm/lib/Target/X86/X86ISelLowering.h       |  5 ++
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 13 +++--
 llvm/test/CodeGen/X86/swift-async-win64.ll  | 61 +++++++++++++--------
 5 files changed, 71 insertions(+), 34 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index fc2d4fba9673b..be416fb0db069 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -1605,6 +1605,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
       [[fallthrough]];
 
     case SwiftAsyncFramePointerMode::Always:
+      assert(
+          !IsWin64Prologue &&
+          "win64 prologue does not set the bit 60 in the saved frame pointer");
       BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr)
           .addUse(MachineFramePtr)
           .addImm(60)
@@ -1747,6 +1750,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
 
     if (!IsFunclet) {
       if (X86FI->hasSwiftAsyncContext()) {
+        assert(!IsWin64Prologue &&
+               "win64 prologue does not store async context right below rbp");
         const auto &Attrs = MF.getFunction().getAttributes();
 
         // Before we update the live frame pointer we have to ensure there's a
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 531e00862558c..091eebdf7ab9b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26572,6 +26572,15 @@ static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
   return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
 }
 
+bool X86::isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
+                                             const MachineFunction &MF) {
+  if (!Subtarget.is64Bit())
+    return false;
+  // 64-bit targets support extended Swift async frame setup,
+  // except for targets that use the windows 64 prologue.
+  return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+}
+
 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
   unsigned IntNo = Op.getConstantOperandVal(1);
@@ -26583,7 +26592,7 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
       SDLoc dl(Op);
       auto &MF = DAG.getMachineFunction();
       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
-      if (Subtarget.is64Bit()) {
+      if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF)) {
         MF.getFrameInfo().setFrameAddressIsTaken(true);
         X86FI->setHasSwiftAsyncContext(true);
         SDValue Chain = Op->getOperand(0);
@@ -26596,13 +26605,15 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
                            CopyRBP.getValue(1));
       } else {
-        // 32-bit so no special extended frame, create or reuse an existing
-        // stack slot.
+        // No special extended frame, create or reuse an existing stack slot.
+        int PtrSize = Subtarget.is64Bit() ? 8 : 4;
         if (!X86FI->getSwiftAsyncContextFrameIdx())
           X86FI->setSwiftAsyncContextFrameIdx(
-              MF.getFrameInfo().CreateStackObject(4, Align(4), false));
+              MF.getFrameInfo().CreateStackObject(PtrSize, Align(PtrSize),
+                                                  false));
         SDValue Result =
-            DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
+            DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(),
+                              PtrSize == 8 ? MVT::i64 : MVT::i32);
         // Return { result, chain }.
         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
                            Op->getOperand(0));
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 32745400a38b7..f93c54781846b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -966,6 +966,11 @@ namespace llvm {
     /// Check if Op is an operation that could be folded into a zero extend x86
     /// instruction.
     bool mayFoldIntoZeroExtend(SDValue Op);
+
+    /// True if the target supports the extended frame for async Swift
+    /// functions.
+    bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
+                                            const MachineFunction &MF);
   } // end namespace X86
 
   //===--------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index d75bd4171fde9..8c9bc759d6c0a 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -1813,14 +1813,17 @@ SDValue X86TargetLowering::LowerFormalArguments(
   for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
     if (Ins[I].Flags.isSwiftAsync()) {
       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
-      if (Subtarget.is64Bit())
+      if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF))
         X86FI->setHasSwiftAsyncContext(true);
       else {
-        int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
+        int PtrSize = Subtarget.is64Bit() ? 8 : 4;
+        int FI =
+            MF.getFrameInfo().CreateStackObject(PtrSize, Align(PtrSize), false);
         X86FI->setSwiftAsyncContextFrameIdx(FI);
-        SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
-                                  DAG.getFrameIndex(FI, MVT::i32),
-                                  MachinePointerInfo::getFixedStack(MF, FI));
+        SDValue St = DAG.getStore(
+            DAG.getEntryNode(), dl, InVals[I],
+            DAG.getFrameIndex(FI, PtrSize == 8 ? MVT::i64 : MVT::i32),
+            MachinePointerInfo::getFixedStack(MF, FI));
         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
       }
     }
diff --git a/llvm/test/CodeGen/X86/swift-async-win64.ll b/llvm/test/CodeGen/X86/swift-async-win64.ll
index 8f30eb62d4b9c..843118ba43558 100644
--- a/llvm/test/CodeGen/X86/swift-async-win64.ll
+++ b/llvm/test/CodeGen/X86/swift-async-win64.ll
@@ -6,14 +6,12 @@ define void @simple(ptr swiftasync %context) "frame-pointer"="all" {
 }
 
 ; CHECK64-LABEL: simple:
-; CHECK64: btsq    $60, %rbp
 ; CHECK64: pushq   %rbp
-; CHECK64: pushq   %r14
-; CHECK64: leaq    8(%rsp), %rbp
-; [...]
-; CHECK64: addq    $16, %rsp
+; CHECK64: pushq   %rax
+; CHECK64: movq    %rsp, %rbp
+; CHECK64: movq    %r14, (%rbp)
+; CHECK64: addq    $8, %rsp
 ; CHECK64: popq    %rbp
-; CHECK64: btrq    $60, %rbp
 ; CHECK64: retq
 
 ; CHECK32-LABEL: simple:
@@ -26,20 +24,20 @@ define void @more_csrs(ptr swiftasync %context) "frame-pointer"="all" {
 }
 
 ; CHECK64-LABEL: more_csrs:
-; CHECK64: btsq    $60, %rbp
 ; CHECK64: pushq   %rbp
 ; CHECK64: .seh_pushreg %rbp
-; CHECK64: pushq   %r14
-; CHECK64: .seh_pushreg %r14
-; CHECK64: leaq    8(%rsp), %rbp
-; CHECK64: subq    $8, %rsp
 ; CHECK64: pushq   %r15
 ; CHECK64: .seh_pushreg %r15
+; CHECK64: pushq   %rax
+; CHECK64: .seh_stackalloc 8
+; CHECK64: movq    %rsp, %rbp
+; CHECK64: .seh_setframe %rbp, 0
+; CHECK64: .seh_endprologue
+; CHECK64: movq    %r14, (%rbp)
 ; [...]
+; CHECK64: addq    $8, %rsp
 ; CHECK64: popq    %r15
-; CHECK64: addq    $16, %rsp
 ; CHECK64: popq    %rbp
-; CHECK64: btrq    $60, %rbp
 ; CHECK64: retq
 
 declare void @f(ptr)
@@ -51,21 +49,16 @@ define void @locals(ptr swiftasync %context) "frame-pointer"="all" {
 }
 
 ; CHECK64-LABEL: locals:
-; CHECK64: btsq    $60, %rbp
 ; CHECK64: pushq   %rbp
 ; CHECK64: .seh_pushreg %rbp
-; CHECK64: pushq   %r14
-; CHECK64: .seh_pushreg %r14
-; CHECK64: leaq    8(%rsp), %rbp
-; CHECK64: subq    $88, %rsp
+; CHECK64: subq    $80, %rsp
+; CHECK64: movq	%r14, -8(%rbp)
 
 ; CHECK64: leaq    -48(%rbp), %rcx
 ; CHECK64: callq   f
 
 ; CHECK64: addq    $80, %rsp
-; CHECK64: addq    $16, %rsp
 ; CHECK64: popq    %rbp
-; CHECK64: btrq    $60, %rbp
 ; CHECK64: retq
 
 define void @use_input_context(ptr swiftasync %context, ptr %ptr) "frame-pointer"="all" {
@@ -84,7 +77,7 @@ define ptr @context_in_func() "frmae-pointer"="non-leaf" {
 }
 
 ; CHECK64-LABEL: context_in_func:
-; CHECK64: leaq    -8(%rbp), %rax
+; CHECK64: movq   %rsp, %rax
 
 ; CHECK32-LABEL: context_in_func:
 ; CHECK32: movl    %esp, %eax
@@ -96,9 +89,7 @@ define void @write_frame_context(ptr swiftasync %context, ptr %new_context) "fra
 }
 
 ; CHECK64-LABEL: write_frame_context:
-; CHECK64: movq    %rbp, [[TMP:%.*]]
-; CHECK64: subq    $8, [[TMP]]
-; CHECK64: movq    %rcx, ([[TMP]])
+; CHECK64: movq    %rcx, (%rsp)
 
 define void @simple_fp_elim(ptr swiftasync %context) "frame-pointer"="non-leaf" {
   ret void
@@ -106,3 +97,25 @@ define void @simple_fp_elim(ptr swiftasync %context) "frame-pointer"="non-leaf"
 
 ; CHECK64-LABEL: simple_fp_elim:
 ; CHECK64-NOT: btsq
+
+define void @manylocals_and_overwritten_context(ptr swiftasync %context, ptr %new_context) "frame-pointer"="all" {
+  %ptr = call ptr @llvm.swift.async.context.addr()
+  store ptr %new_context, ptr %ptr
+  %var1 = alloca i64, i64 1
+  call void @f(ptr %var1)
+  %var2 = alloca i64, i64 16
+  call void @f(ptr %var2)
+  %ptr2 = call ptr @llvm.swift.async.context.addr()
+  store ptr %new_context, ptr %ptr2
+  ret void
+}
+
+; CHECK64-LABEL: manylocals_and_overwritten_context:
+; CHECK64:       pushq	%rbp
+; CHECK64:       subq	$184, %rsp
+; CHECK64:       leaq	128(%rsp), %rbp
+; CHECK64:       movq	%rcx, %rsi
+; CHECK64:       movq	%rcx, 48(%rbp)
+; CHECK64:       callq	f
+; CHECK64:       callq	f
+; CHECK64:       movq	%rsi, 48(%rbp)



More information about the llvm-commits mailing list