[llvm] e1e1836 - [CodeGen] Inline stack guard check on Windows (#136290)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 12 07:38:46 PDT 2025


Author: Omair Javaid
Date: 2025-06-12T19:38:42+05:00
New Revision: e1e1836bbd70e4f30bd0be97b9d81eabfd6b45c8

URL: https://github.com/llvm/llvm-project/commit/e1e1836bbd70e4f30bd0be97b9d81eabfd6b45c8
DIFF: https://github.com/llvm/llvm-project/commit/e1e1836bbd70e4f30bd0be97b9d81eabfd6b45c8.diff

LOG: [CodeGen] Inline stack guard check on Windows (#136290)

This patch optimizes the Windows security cookie check mechanism by
moving the comparison inline and only calling __security_check_cookie
when the check fails. This reduces the overhead of making a DLL call 
for every function return.

Previously, we implemented this optimization through a machine pass
(X86WinFixupBufferSecurityCheckPass) in PR #95904 submitted by
@mahesh-attarde. We have reverted that pass in favor of this new 
approach. Also we have abandoned the AArch64 specific implementation 
of same pass in PR #121938 in favor of this more general solution.

The old machine instruction pass approach:
- Scanned the generated code to find __security_check_cookie calls
- Modified these calls by splitting basic blocks
- Added comparison logic and conditional branching
- Required complex block management and live register computation

The new approach:
- Implements the same optimization during instruction selection
- Directly emits the comparison and conditional branching
- No need for post-processing or basic block manipulation
- Disables optimization at -Oz.

Thanks @tamaspetz, @efriedma-quic and @arsenm for their help.

Added: 
    llvm/test/CodeGen/X86/stack-protector-msvc-oz.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
    llvm/lib/Target/X86/CMakeLists.txt
    llvm/lib/Target/X86/X86.h
    llvm/lib/Target/X86/X86TargetMachine.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
    llvm/test/CodeGen/X86/opt-pipeline.ll
    llvm/test/CodeGen/X86/stack-protector-msvc.ll
    llvm/test/CodeGen/X86/tailcc-ssp.ll
    llvm/test/DebugInfo/COFF/fpo-stack-protect.ll

Removed: 
    llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index e6a1dc930685c..c63eb7fc6b374 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3037,8 +3037,9 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
 
   // First create the loads to the guard/stack slot for the comparison.
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
-  EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
+  auto &DL = DAG.getDataLayout();
+  EVT PtrTy = TLI.getFrameIndexTy(DL);
+  EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
 
   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
   int FI = MFI.getStackProtectorIndex();
@@ -3047,8 +3048,8 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
   SDLoc dl = getCurSDLoc();
   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
   const Module &M = *ParentBB->getParent()->getFunction().getParent();
-  Align Align =
-      DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
+  Align Align = DL.getPrefTypeAlign(
+      PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
 
   // Generate code to load the content of the guard slot.
   SDValue GuardVal = DAG.getLoad(
@@ -3059,8 +3060,14 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
   if (TLI.useStackGuardXorFP())
     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
 
-  // Retrieve guard check function, nullptr if instrumentation is inlined.
-  if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
+  // If we're using function-based instrumentation, call the guard check
+  // function
+  if (SPD.shouldEmitFunctionBasedCheckStackProtector()) {
+    // Get the guard check function from the target and verify it exists since
+    // we're using function-based instrumentation
+    const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M);
+    assert(GuardCheckFn && "Guard check function is null");
+
     // The target provides a guard check function to validate the guard value.
     // Generate a call to that function with the content of the guard slot as
     // argument.
@@ -3101,10 +3108,9 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
   }
 
   // Perform the comparison via a getsetcc.
-  SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
-                                                        *DAG.getContext(),
-                                                        Guard.getValueType()),
-                             Guard, GuardVal, ISD::SETNE);
+  SDValue Cmp = DAG.getSetCC(
+      dl, TLI.getSetCCResultType(DL, *DAG.getContext(), Guard.getValueType()),
+      Guard, GuardVal, ISD::SETNE);
 
   // If the guard/stackslot do not equal, branch to failure MBB.
   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
@@ -3126,14 +3132,69 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
 /// For a high level explanation of how this fits into the stack protector
 /// generation see the comment on the declaration of class
 /// StackProtectorDescriptor.
-void
-SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
+void SelectionDAGBuilder::visitSPDescriptorFailure(
+    StackProtectorDescriptor &SPD) {
+
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  TargetLowering::MakeLibCallOptions CallOptions;
-  CallOptions.setDiscardResult(true);
-  SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
-                                  MVT::isVoid, {}, CallOptions, getCurSDLoc())
-                      .second;
+  MachineBasicBlock *ParentBB = SPD.getParentMBB();
+  const Module &M = *ParentBB->getParent()->getFunction().getParent();
+  SDValue Chain;
+
+  // For -Oz builds with a guard check function, we use function-based
+  // instrumentation. Otherwise, if we have a guard check function, we call it
+  // in the failure block.
+  auto *GuardCheckFn = TLI.getSSPStackGuardCheck(M);
+  if (GuardCheckFn && !SPD.shouldEmitFunctionBasedCheckStackProtector()) {
+    // First create the loads to the guard/stack slot for the comparison.
+    auto &DL = DAG.getDataLayout();
+    EVT PtrTy = TLI.getFrameIndexTy(DL);
+    EVT PtrMemTy = TLI.getPointerMemTy(DL, DL.getAllocaAddrSpace());
+
+    MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
+    int FI = MFI.getStackProtectorIndex();
+
+    SDLoc dl = getCurSDLoc();
+    SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
+    Align Align = DL.getPrefTypeAlign(
+        PointerType::get(M.getContext(), DL.getAllocaAddrSpace()));
+
+    // Generate code to load the content of the guard slot.
+    SDValue GuardVal = DAG.getLoad(
+        PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
+        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
+        MachineMemOperand::MOVolatile);
+
+    if (TLI.useStackGuardXorFP())
+      GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
+
+    // The target provides a guard check function to validate the guard value.
+    // Generate a call to that function with the content of the guard slot as
+    // argument.
+    FunctionType *FnTy = GuardCheckFn->getFunctionType();
+    assert(FnTy->getNumParams() == 1 && "Invalid function signature");
+
+    TargetLowering::ArgListTy Args;
+    TargetLowering::ArgListEntry Entry;
+    Entry.Node = GuardVal;
+    Entry.Ty = FnTy->getParamType(0);
+    if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
+      Entry.IsInReg = true;
+    Args.push_back(Entry);
+
+    TargetLowering::CallLoweringInfo CLI(DAG);
+    CLI.setDebugLoc(getCurSDLoc())
+        .setChain(DAG.getEntryNode())
+        .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
+                   getValue(GuardCheckFn), std::move(Args));
+
+    Chain = TLI.LowerCallTo(CLI).second;
+  } else {
+    TargetLowering::MakeLibCallOptions CallOptions;
+    CallOptions.setDiscardResult(true);
+    Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
+                            {}, CallOptions, getCurSDLoc())
+                .second;
+  }
 
   // Emit a trap instruction if we are required to do so.
   const TargetOptions &TargetOpts = DAG.getTarget().Options;

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index f599637564715..b02a03c0b0cb2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -1877,7 +1877,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
 
     if (SP->shouldEmitSDCheck(*LLVMBB)) {
       bool FunctionBasedInstrumentation =
-          TLI->getSSPStackGuardCheck(*Fn.getParent());
+          TLI->getSSPStackGuardCheck(*Fn.getParent()) && Fn.hasMinSize();
       SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->getMBB(LLVMBB),
                                    FunctionBasedInstrumentation);
     }
@@ -1950,8 +1950,7 @@ SelectionDAGISel::FinishBasicBlock() {
 
     // Add load and check to the basicblock.
     FuncInfo->MBB = ParentMBB;
-    FuncInfo->InsertPt =
-        findSplitPointForStackProtector(ParentMBB, *TII);
+    FuncInfo->InsertPt = findSplitPointForStackProtector(ParentMBB, *TII);
     SDB->visitSPDescriptorParent(SDB->SPDescriptor, ParentMBB);
     CurDAG->setRoot(SDB->getRoot());
     SDB->clear();
@@ -1973,8 +1972,7 @@ SelectionDAGISel::FinishBasicBlock() {
         findSplitPointForStackProtector(ParentMBB, *TII);
 
     // Splice the terminator of ParentMBB into SuccessMBB.
-    SuccessMBB->splice(SuccessMBB->end(), ParentMBB,
-                       SplitPoint,
+    SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
                        ParentMBB->end());
 
     // Add compare/jump on neq/jump to the parent BB.

diff  --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 6627e97dd0943..1bf9f8b467993 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -85,7 +85,6 @@ set(sources
   X86VZeroUpper.cpp
   X86WinEHState.cpp
   X86WinEHUnwindV2.cpp
-  X86WinFixupBufferSecurityCheck.cpp
   X86InsertWait.cpp
   GISel/X86CallLowering.cpp
   GISel/X86InstructionSelector.cpp

diff  --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index be2ddac35cab8..6261fadf10a7a 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -73,9 +73,6 @@ FunctionPass *createX86OptimizeLEAs();
 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
 FunctionPass *createX86FixupSetCC();
 
-/// Return a pass that transform inline buffer security check into seperate bb
-FunctionPass *createX86WinFixupBufferSecurityCheckPass();
-
 /// Return a pass that avoids creating store forward block issues in the hardware.
 FunctionPass *createX86AvoidStoreForwardingBlocks();
 
@@ -195,7 +192,6 @@ void initializeX86ExpandPseudoPass(PassRegistry &);
 void initializeX86FastPreTileConfigPass(PassRegistry &);
 void initializeX86FastTileConfigPass(PassRegistry &);
 void initializeX86FixupSetCCPassPass(PassRegistry &);
-void initializeX86WinFixupBufferSecurityCheckPassPass(PassRegistry &);
 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);

diff  --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 7e75c0e565863..2d4afc23f1a42 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -554,7 +554,6 @@ bool X86PassConfig::addPreISel() {
 void X86PassConfig::addPreRegAlloc() {
   if (getOptLevel() != CodeGenOptLevel::None) {
     addPass(&LiveRangeShrinkID);
-    addPass(createX86WinFixupBufferSecurityCheckPass());
     addPass(createX86FixupSetCC());
     addPass(createX86OptimizeLEAs());
     addPass(createX86CallFrameOptimization());

diff  --git a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
deleted file mode 100644
index 5c12af1fee637..0000000000000
--- a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-//===- X86WinFixupBufferSecurityCheck.cpp Fix Buffer Security Check Call -===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Buffer Security Check implementation inserts windows specific callback into
-// code. On windows, __security_check_cookie call gets call everytime function
-// is return without fixup. Since this function is defined in runtime library,
-// it incures cost of call in dll which simply does comparison and returns most
-// time. With Fixup, We selective move to call in DLL only if comparison fails.
-//===----------------------------------------------------------------------===//
-
-#include "X86.h"
-#include "X86FrameLowering.h"
-#include "X86InstrInfo.h"
-#include "X86Subtarget.h"
-#include "llvm/CodeGen/LivePhysRegs.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/IR/Module.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "x86-win-fixup-bscheck"
-
-namespace {
-
-class X86WinFixupBufferSecurityCheckPass : public MachineFunctionPass {
-public:
-  static char ID;
-
-  X86WinFixupBufferSecurityCheckPass() : MachineFunctionPass(ID) {}
-
-  StringRef getPassName() const override {
-    return "X86 Windows Fixup Buffer Security Check";
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  std::pair<MachineBasicBlock *, MachineInstr *>
-  getSecurityCheckerBasicBlock(MachineFunction &MF);
-
-  void getGuardCheckSequence(MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
-                             MachineInstr *SeqMI[5]);
-
-  void SplitBasicBlock(MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
-                       MachineBasicBlock::iterator SplitIt);
-
-  void FinishBlock(MachineBasicBlock *MBB);
-
-  void FinishFunction(MachineBasicBlock *FailMBB, MachineBasicBlock *NewRetMBB);
-
-  std::pair<MachineInstr *, MachineInstr *>
-  CreateFailCheckSequence(MachineBasicBlock *CurMBB, MachineBasicBlock *FailMBB,
-                          MachineInstr *SeqMI[5]);
-};
-} // end anonymous namespace
-
-char X86WinFixupBufferSecurityCheckPass::ID = 0;
-
-INITIALIZE_PASS(X86WinFixupBufferSecurityCheckPass, DEBUG_TYPE, DEBUG_TYPE,
-                false, false)
-
-FunctionPass *llvm::createX86WinFixupBufferSecurityCheckPass() {
-  return new X86WinFixupBufferSecurityCheckPass();
-}
-
-void X86WinFixupBufferSecurityCheckPass::SplitBasicBlock(
-    MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
-    MachineBasicBlock::iterator SplitIt) {
-  NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplitIt, CurMBB->end());
-}
-
-std::pair<MachineBasicBlock *, MachineInstr *>
-X86WinFixupBufferSecurityCheckPass::getSecurityCheckerBasicBlock(
-    MachineFunction &MF) {
-  MachineBasicBlock::reverse_iterator RBegin, REnd;
-
-  for (auto &MBB : llvm::reverse(MF)) {
-    for (RBegin = MBB.rbegin(), REnd = MBB.rend(); RBegin != REnd; ++RBegin) {
-      auto &MI = *RBegin;
-      if (MI.getOpcode() == X86::CALL64pcrel32 &&
-          MI.getNumExplicitOperands() == 1) {
-        auto MO = MI.getOperand(0);
-        if (MO.isGlobal()) {
-          auto Callee = dyn_cast<Function>(MO.getGlobal());
-          if (Callee && Callee->getName() == "__security_check_cookie") {
-            return std::make_pair(&MBB, &MI);
-            break;
-          }
-        }
-      }
-    }
-  }
-  return std::make_pair(nullptr, nullptr);
-}
-
-void X86WinFixupBufferSecurityCheckPass::getGuardCheckSequence(
-    MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
-    MachineInstr *SeqMI[5]) {
-
-  MachineBasicBlock::iterator UIt(CheckCall);
-  MachineBasicBlock::reverse_iterator DIt(CheckCall);
-  // Seq From StackUp to Stack Down Is fixed.
-  // ADJCALLSTACKUP64
-  ++UIt;
-  SeqMI[4] = &*UIt;
-
-  // CALL __security_check_cookie
-  SeqMI[3] = CheckCall;
-
-  // COPY function slot cookie
-  ++DIt;
-  SeqMI[2] = &*DIt;
-
-  // ADJCALLSTACKDOWN64
-  ++DIt;
-  SeqMI[1] = &*DIt;
-
-  MachineBasicBlock::reverse_iterator XIt(SeqMI[1]);
-  for (; XIt != CurMBB->rbegin(); ++XIt) {
-    auto &CI = *XIt;
-    if ((CI.getOpcode() == X86::XOR64_FP) || (CI.getOpcode() == X86::XOR32_FP))
-      break;
-  }
-  SeqMI[0] = &*XIt;
-}
-
-std::pair<MachineInstr *, MachineInstr *>
-X86WinFixupBufferSecurityCheckPass::CreateFailCheckSequence(
-    MachineBasicBlock *CurMBB, MachineBasicBlock *FailMBB,
-    MachineInstr *SeqMI[5]) {
-
-  auto MF = CurMBB->getParent();
-
-  Module &M = *MF->getFunction().getParent();
-  GlobalVariable *GV = M.getGlobalVariable("__security_cookie");
-  assert(GV && " Security Cookie was not installed!");
-
-  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
-
-  MachineInstr *GuardXor = SeqMI[0];
-  MachineBasicBlock::iterator InsertPt(GuardXor);
-  ++InsertPt;
-
-  // Compare security_Cookie with XOR_Val, if not same, we have violation
-  auto CMI = BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rm))
-                 .addReg(GuardXor->getOperand(0).getReg())
-                 .addReg(X86::RIP)
-                 .addImm(1)
-                 .addReg(X86::NoRegister)
-                 .addGlobalAddress(GV)
-                 .addReg(X86::NoRegister);
-
-  BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::JCC_1))
-      .addMBB(FailMBB)
-      .addImm(X86::COND_NE);
-
-  auto JMI = BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::JMP_1));
-
-  return std::make_pair(CMI.getInstr(), JMI.getInstr());
-}
-
-void X86WinFixupBufferSecurityCheckPass::FinishBlock(MachineBasicBlock *MBB) {
-  LivePhysRegs LiveRegs;
-  computeAndAddLiveIns(LiveRegs, *MBB);
-}
-
-void X86WinFixupBufferSecurityCheckPass::FinishFunction(
-    MachineBasicBlock *FailMBB, MachineBasicBlock *NewRetMBB) {
-  FailMBB->getParent()->RenumberBlocks();
-  // FailMBB includes call to MSCV RT  where is __security_check_cookie
-  // function is called. This function uses regcall and it expects cookie
-  // value from stack slot.( even if this is modified)
-  // Before going further we compute back livein for this block to make sure
-  // it is live and provided.
-  FinishBlock(FailMBB);
-  FinishBlock(NewRetMBB);
-}
-
-bool X86WinFixupBufferSecurityCheckPass::runOnMachineFunction(
-    MachineFunction &MF) {
-  bool Changed = false;
-  const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
-
-  if (!(STI.isTargetWindowsItanium() || STI.isTargetWindowsMSVC()))
-    return Changed;
-
-  // Check if security cookie was installed or not
-  Module &M = *MF.getFunction().getParent();
-  GlobalVariable *GV = M.getGlobalVariable("__security_cookie");
-  if (!GV)
-    return Changed;
-
-  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
-
-  // Check if security check cookie was installed or not
-  auto [CurMBB, CheckCall] = getSecurityCheckerBasicBlock(MF);
-
-  if (!CheckCall)
-    return Changed;
-
-  MachineBasicBlock *FailMBB = MF.CreateMachineBasicBlock();
-  MachineBasicBlock *NewRetMBB = MF.CreateMachineBasicBlock();
-
-  MF.insert(MF.end(), NewRetMBB);
-  MF.insert(MF.end(), FailMBB);
-
-  MachineInstr *SeqMI[5];
-  getGuardCheckSequence(CurMBB, CheckCall, SeqMI);
-  // MachineInstr * GuardXor  = SeqMI[0];
-
-  auto FailSeqRange = CreateFailCheckSequence(CurMBB, FailMBB, SeqMI);
-  MachineInstrBuilder JMI(MF, FailSeqRange.second);
-
-  // After Inserting JMP_1, we can not have two terminators
-  // in same block, split CurrentMBB after JMP_1
-  MachineBasicBlock::iterator SplitIt(SeqMI[4]);
-  ++SplitIt;
-  SplitBasicBlock(CurMBB, NewRetMBB, SplitIt);
-
-  // Fill up Failure Routine, move Fail Check Squence from CurMBB to FailMBB
-  MachineBasicBlock::iterator U1It(SeqMI[1]);
-  MachineBasicBlock::iterator U2It(SeqMI[4]);
-  ++U2It;
-  FailMBB->splice(FailMBB->end(), CurMBB, U1It, U2It);
-  BuildMI(*FailMBB, FailMBB->end(), DebugLoc(), TII->get(X86::INT3));
-
-  // Move left over instruction after StackUp
-  // from Current Basic BLocks into New Return Block
-  JMI.addMBB(NewRetMBB);
-  MachineBasicBlock::iterator SplicePt(JMI.getInstr());
-  ++SplicePt;
-  if (SplicePt != CurMBB->end())
-    NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplicePt);
-
-  // Restructure Basic Blocks
-  CurMBB->addSuccessor(NewRetMBB);
-  CurMBB->addSuccessor(FailMBB);
-
-  FinishFunction(FailMBB, NewRetMBB);
-  return !Changed;
-}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
index 6aefc5341da07..e7f4785d01df6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-stack-protector-windows.ll
@@ -17,8 +17,12 @@ define void @caller() sspreq {
 ; CHECK-NEXT:    ldr x8, [x8, :lo12:__security_cookie]
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    bl callee
-; CHECK-NEXT:    ldr x0, [sp, #8]
-; CHECK-NEXT:    bl __security_check_cookie
+; CHECK-NEXT:    adrp x8, __security_cookie
+; CHECK-NEXT:    ldr x9, [sp, #8]
+; CHECK-NEXT:    ldr x8, [x8, :lo12:__security_cookie]
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    b.ne .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %entry
 ; CHECK-NEXT:    .seh_startepilogue
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    .seh_save_reg x30, 16
@@ -26,6 +30,10 @@ define void @caller() sspreq {
 ; CHECK-NEXT:    .seh_stackalloc 32
 ; CHECK-NEXT:    .seh_endepilogue
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: // %entry
+; CHECK-NEXT:    ldr x0, [sp, #8]
+; CHECK-NEXT:    bl __security_check_cookie
+; CHECK-NEXT:    brk #0x1
 ; CHECK-NEXT:    .seh_endfunclet
 ; CHECK-NEXT:    .seh_endproc
 entry:

diff  --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 540046e6a8638..8d155bd57df13 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -121,7 +121,6 @@
 ; CHECK-NEXT:       Peephole Optimizations
 ; CHECK-NEXT:       Remove dead machine instructions
 ; CHECK-NEXT:       Live Range Shrink
-; CHECK-NEXT:       X86 Windows Fixup Buffer Security Check
 ; CHECK-NEXT:       X86 Fixup SetCC
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       X86 LEA Optimize

diff  --git a/llvm/test/CodeGen/X86/stack-protector-msvc-oz.ll b/llvm/test/CodeGen/X86/stack-protector-msvc-oz.ll
new file mode 100644
index 0000000000000..d8a772efbd7ed
--- /dev/null
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc-oz.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X86 %s
+; RUN: llc -mtriple=x86_64-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X64 %s
+
+; Make sure fastisel falls back and does something secure.
+; RUN: llc -mtriple=i686-pc-windows-msvc -O0 < %s -o - | FileCheck -check-prefix=MSVC-X86-O0 %s
+; RUN: llc -mtriple=x86_64-pc-windows-msvc -O0 < %s -o - | FileCheck -check-prefix=MSVC-X64-O0 %s
+
+@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"    ; <ptr> [#uses=1]
+
+define void @test(ptr %a) nounwind ssp minsize {
+; MSVC-X86-LABEL: test:
+; MSVC-X86:       # %bb.0: # %entry
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    subl $12, %esp
+; MSVC-X86-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-NEXT:    xorl %esp, %eax
+; MSVC-X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    movl %esp, %esi
+; MSVC-X86-NEXT:    pushl {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    calll _strcpy
+; MSVC-X86-NEXT:    popl %ecx
+; MSVC-X86-NEXT:    popl %edx
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    pushl $LC
+; MSVC-X86-NEXT:    calll _printf
+; MSVC-X86-NEXT:    popl %ecx
+; MSVC-X86-NEXT:    popl %edx
+; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-NEXT:    xorl %esp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    addl $12, %esp
+; MSVC-X86-NEXT:    popl %esi
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test:
+; MSVC-X64:       # %bb.0: # %entry
+; MSVC-X64-NEXT:    pushq %rsi
+; MSVC-X64-NEXT:    subq $64, %rsp
+; MSVC-X64-NEXT:    movq %rcx, %rdx
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rsp, %rax
+; MSVC-X64-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rsi
+; MSVC-X64-NEXT:    movq %rsi, %rcx
+; MSVC-X64-NEXT:    callq strcpy
+; MSVC-X64-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-NEXT:    movq %rsi, %rdx
+; MSVC-X64-NEXT:    callq printf
+; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    addq $64, %rsp
+; MSVC-X64-NEXT:    popq %rsi
+; MSVC-X64-NEXT:    retq
+;
+; MSVC-X86-O0-LABEL: test:
+; MSVC-X86-O0:       # %bb.0: # %entry
+; MSVC-X86-O0-NEXT:    subl $20, %esp
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    xorl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, 4(%eax)
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, (%eax)
+; MSVC-X86-O0-NEXT:    calll _strcpy
+; MSVC-X86-O0-NEXT:    leal LC, %ecx
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, (%esp)
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    calll _printf
+; MSVC-X86-O0-NEXT:  # %bb.1: # %return
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %esp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    addl $20, %esp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test:
+; MSVC-X64-O0:       # %bb.0: # %entry
+; MSVC-X64-O0-NEXT:    subq $56, %rsp
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    callq strcpy
+; MSVC-X64-O0-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    callq printf
+; MSVC-X64-O0-NEXT:  # %bb.1: # %return
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    addq $56, %rsp
+; MSVC-X64-O0-NEXT:    retq
+entry:
+ %a_addr = alloca ptr    ; <ptr> [#uses=2]
+ %buf = alloca [8 x i8]    ; <ptr> [#uses=2]
+ store ptr %a, ptr %a_addr
+ %0 = load ptr, ptr %a_addr, align 4    ; <ptr> [#uses=1]
+ %1 = call ptr @strcpy(ptr %buf, ptr %0) nounwind   ; <ptr> [#uses=0]
+ %2 = call i32 (ptr, ...) @printf(ptr @"\01LC", ptr %buf) nounwind    ; <i32> [#uses=0]
+ br label %return
+
+return:    ; preds = %entry
+ ret void
+}
+
+declare ptr @strcpy(ptr, ptr) nounwind
+
+declare i32 @printf(ptr, ...) nounwind
+

diff  --git a/llvm/test/CodeGen/X86/stack-protector-msvc.ll b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
index d718062d2c485..a868fa549296d 100644
--- a/llvm/test/CodeGen/X86/stack-protector-msvc.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
@@ -25,12 +25,19 @@ define void @test(ptr %a) nounwind ssp {
 ; MSVC-X86-NEXT:    pushl $LC
 ; MSVC-X86-NEXT:    calll _printf
 ; MSVC-X86-NEXT:    addl $8, %esp
-; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; MSVC-X86-NEXT:    xorl %esp, %ecx
-; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; MSVC-X86-NEXT:    xorl %esp, %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    cmpl %eax, %ecx
+; MSVC-X86-NEXT:    jne LBB0_2
+; MSVC-X86-NEXT:  # %bb.1: # %return
 ; MSVC-X86-NEXT:    addl $12, %esp
 ; MSVC-X86-NEXT:    popl %esi
 ; MSVC-X86-NEXT:    retl
+; MSVC-X86-NEXT:  LBB0_2: # %return
+; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-NEXT:    xorl %esp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
 ;
 ; MSVC-X64-LABEL: test:
 ; MSVC-X64:       # %bb.0: # %entry
@@ -47,17 +54,19 @@ define void @test(ptr %a) nounwind ssp {
 ; MSVC-X64-NEXT:    leaq LC(%rip), %rcx
 ; MSVC-X64-NEXT:    movq %rsi, %rdx
 ; MSVC-X64-NEXT:    callq printf
-; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
-; MSVC-X64-NEXT:    xorq %rsp, %rcx
-; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; MSVC-X64-NEXT:    xorq %rsp, %rax
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    cmpq %rax, %rcx
 ; MSVC-X64-NEXT:    jne .LBB0_2
-; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:  # %bb.1: # %return
 ; MSVC-X64-NEXT:    addq $64, %rsp
 ; MSVC-X64-NEXT:    popq %rsi
 ; MSVC-X64-NEXT:    retq
-; MSVC-X64-NEXT:  .LBB0_2:
+; MSVC-X64-NEXT:  .LBB0_2: # %return
+; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-NEXT:    xorq %rsp, %rcx
 ; MSVC-X64-NEXT:    callq __security_check_cookie
-; MSVC-X64-NEXT:    int3
 ;
 ; MSVC-X86-O0-LABEL: test:
 ; MSVC-X86-O0:       # %bb.0: # %entry
@@ -80,7 +89,15 @@ define void @test(ptr %a) nounwind ssp {
 ; MSVC-X86-O0-NEXT:  # %bb.1: # %return
 ; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; MSVC-X86-O0-NEXT:    xorl %esp, %ecx
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    subl %ecx, %eax
+; MSVC-X86-O0-NEXT:    jne LBB0_3
+; MSVC-X86-O0-NEXT:    jmp LBB0_2
+; MSVC-X86-O0-NEXT:  LBB0_3: # %return
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %esp, %ecx
 ; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:  LBB0_2: # %return
 ; MSVC-X86-O0-NEXT:    addl $20, %esp
 ; MSVC-X86-O0-NEXT:    retl
 ;
@@ -100,9 +117,18 @@ define void @test(ptr %a) nounwind ssp {
 ; MSVC-X64-O0-NEXT:  # %bb.1: # %return
 ; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
 ; MSVC-X64-O0-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    subq %rcx, %rax
+; MSVC-X64-O0-NEXT:    jne .LBB0_3
+; MSVC-X64-O0-NEXT:    jmp .LBB0_2
+; MSVC-X64-O0-NEXT:  .LBB0_3: # %return
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rcx
 ; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:  .LBB0_2: # %return
 ; MSVC-X64-O0-NEXT:    addq $56, %rsp
 ; MSVC-X64-O0-NEXT:    retq
+
 entry:
  %a_addr = alloca ptr    ; <ptr> [#uses=2]
  %buf = alloca [8 x i8]    ; <ptr> [#uses=2]
@@ -134,12 +160,19 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; MSVC-X86-NEXT:    pushl %eax
 ; MSVC-X86-NEXT:    calll _escape
 ; MSVC-X86-NEXT:    addl $4, %esp
-; MSVC-X86-NEXT:    movl -4(%ebp), %ecx
-; MSVC-X86-NEXT:    xorl %ebp, %ecx
-; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    movl -4(%ebp), %eax
+; MSVC-X86-NEXT:    xorl %ebp, %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    cmpl %eax, %ecx
+; MSVC-X86-NEXT:    jne LBB1_2
+; MSVC-X86-NEXT:  # %bb.1:
 ; MSVC-X86-NEXT:    movl %ebp, %esp
 ; MSVC-X86-NEXT:    popl %ebp
 ; MSVC-X86-NEXT:    retl
+; MSVC-X86-NEXT:  LBB1_2:
+; MSVC-X86-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
 ;
 ; MSVC-X64-LABEL: test_vla:
 ; MSVC-X64:       # %bb.0:
@@ -158,19 +191,20 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; MSVC-X64-NEXT:    subq $32, %rsp
 ; MSVC-X64-NEXT:    callq escape
 ; MSVC-X64-NEXT:    addq $32, %rsp
-; MSVC-X64-NEXT:    movq -8(%rbp), %rcx
-; MSVC-X64-NEXT:    xorq %rbp, %rcx
-; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    movq -8(%rbp), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    cmpq %rax, %rcx
 ; MSVC-X64-NEXT:    jne .LBB1_2
 ; MSVC-X64-NEXT:  # %bb.1:
 ; MSVC-X64-NEXT:    movq %rbp, %rsp
 ; MSVC-X64-NEXT:    popq %rbp
 ; MSVC-X64-NEXT:    retq
 ; MSVC-X64-NEXT:  .LBB1_2:
+; MSVC-X64-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
 ; MSVC-X64-NEXT:    subq $32, %rsp
 ; MSVC-X64-NEXT:    callq __security_check_cookie
-; MSVC-X64-NEXT:    addq $32, %rsp
-; MSVC-X64-NEXT:    int3
 ;
 ; MSVC-X86-O0-LABEL: test_vla:
 ; MSVC-X86-O0:       # %bb.0:
@@ -190,7 +224,15 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; MSVC-X86-O0-NEXT:    addl $4, %esp
 ; MSVC-X86-O0-NEXT:    movl -4(%ebp), %ecx
 ; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    subl %ecx, %eax
+; MSVC-X86-O0-NEXT:    jne LBB1_2
+; MSVC-X86-O0-NEXT:    jmp LBB1_1
+; MSVC-X86-O0-NEXT:  LBB1_2:
+; MSVC-X86-O0-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
 ; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:  LBB1_1:
 ; MSVC-X86-O0-NEXT:    movl %ebp, %esp
 ; MSVC-X86-O0-NEXT:    popl %ebp
 ; MSVC-X86-O0-NEXT:    retl
@@ -215,8 +257,16 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; MSVC-X64-O0-NEXT:    addq $32, %rsp
 ; MSVC-X64-O0-NEXT:    movq -8(%rbp), %rcx
 ; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    subq %rcx, %rax
+; MSVC-X64-O0-NEXT:    jne .LBB1_2
+; MSVC-X64-O0-NEXT:    jmp .LBB1_1
+; MSVC-X64-O0-NEXT:  .LBB1_2:
+; MSVC-X64-O0-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
 ; MSVC-X64-O0-NEXT:    subq $32, %rsp
 ; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:  .LBB1_1:
 ; MSVC-X64-O0-NEXT:    movq %rbp, %rsp
 ; MSVC-X64-O0-NEXT:    popq %rbp
 ; MSVC-X64-O0-NEXT:    retq
@@ -253,14 +303,21 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X86-NEXT:    pushl %edi
 ; MSVC-X86-NEXT:    calll _escape
 ; MSVC-X86-NEXT:    addl $4, %esp
-; MSVC-X86-NEXT:    movl 12(%esi), %ecx
-; MSVC-X86-NEXT:    xorl %ebp, %ecx
-; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    movl 12(%esi), %eax
+; MSVC-X86-NEXT:    xorl %ebp, %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    cmpl %eax, %ecx
+; MSVC-X86-NEXT:    jne LBB2_2
+; MSVC-X86-NEXT:  # %bb.1:
 ; MSVC-X86-NEXT:    leal -8(%ebp), %esp
 ; MSVC-X86-NEXT:    popl %esi
 ; MSVC-X86-NEXT:    popl %edi
 ; MSVC-X86-NEXT:    popl %ebp
 ; MSVC-X86-NEXT:    retl
+; MSVC-X86-NEXT:  LBB2_2:
+; MSVC-X86-NEXT:    movl 12(%esi), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
 ;
 ; MSVC-X64-LABEL: test_vla_realign:
 ; MSVC-X64:       # %bb.0:
@@ -286,9 +343,10 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X64-NEXT:    movq %rsi, %rcx
 ; MSVC-X64-NEXT:    callq escape
 ; MSVC-X64-NEXT:    addq $32, %rsp
-; MSVC-X64-NEXT:    movq 24(%rbx), %rcx
-; MSVC-X64-NEXT:    xorq %rbp, %rcx
-; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    movq 24(%rbx), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    cmpq %rax, %rcx
 ; MSVC-X64-NEXT:    jne .LBB2_2
 ; MSVC-X64-NEXT:  # %bb.1:
 ; MSVC-X64-NEXT:    movq %rbp, %rsp
@@ -297,10 +355,10 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X64-NEXT:    popq %rbp
 ; MSVC-X64-NEXT:    retq
 ; MSVC-X64-NEXT:  .LBB2_2:
+; MSVC-X64-NEXT:    movq 24(%rbx), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
 ; MSVC-X64-NEXT:    subq $32, %rsp
 ; MSVC-X64-NEXT:    callq __security_check_cookie
-; MSVC-X64-NEXT:    addq $32, %rsp
-; MSVC-X64-NEXT:    int3
 ;
 ; MSVC-X86-O0-LABEL: test_vla_realign:
 ; MSVC-X86-O0:       # %bb.0:
@@ -328,7 +386,15 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X86-O0-NEXT:    addl $4, %esp
 ; MSVC-X86-O0-NEXT:    movl 48(%esi), %ecx
 ; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    subl %ecx, %eax
+; MSVC-X86-O0-NEXT:    jne LBB2_2
+; MSVC-X86-O0-NEXT:    jmp LBB2_1
+; MSVC-X86-O0-NEXT:  LBB2_2:
+; MSVC-X86-O0-NEXT:    movl 48(%esi), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
 ; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:  LBB2_1:
 ; MSVC-X86-O0-NEXT:    leal -4(%ebp), %esp
 ; MSVC-X86-O0-NEXT:    popl %esi
 ; MSVC-X86-O0-NEXT:    popl %ebp
@@ -361,8 +427,16 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X64-O0-NEXT:    addq $32, %rsp
 ; MSVC-X64-O0-NEXT:    movq 64(%rbx), %rcx
 ; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    subq %rcx, %rax
+; MSVC-X64-O0-NEXT:    jne .LBB2_2
+; MSVC-X64-O0-NEXT:    jmp .LBB2_1
+; MSVC-X64-O0-NEXT:  .LBB2_2:
+; MSVC-X64-O0-NEXT:    movq 64(%rbx), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
 ; MSVC-X64-O0-NEXT:    subq $32, %rsp
 ; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:  .LBB2_1:
 ; MSVC-X64-O0-NEXT:    leaq 8(%rbp), %rsp
 ; MSVC-X64-O0-NEXT:    popq %rbx
 ; MSVC-X64-O0-NEXT:    popq %rbp
@@ -377,3 +451,4 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 declare ptr @strcpy(ptr, ptr) nounwind
 
 declare i32 @printf(ptr, ...) nounwind
+

diff  --git a/llvm/test/CodeGen/X86/tailcc-ssp.ll b/llvm/test/CodeGen/X86/tailcc-ssp.ll
index 7ea5dd49f0242..ac5dda7d69bde 100644
--- a/llvm/test/CodeGen/X86/tailcc-ssp.ll
+++ b/llvm/test/CodeGen/X86/tailcc-ssp.ll
@@ -13,9 +13,10 @@ define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; WINDOWS-NEXT:    movq __security_cookie(%rip), %rax
 ; WINDOWS-NEXT:    xorq %rsp, %rax
 ; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
-; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
-; WINDOWS-NEXT:    xorq %rsp, %rcx
-; WINDOWS-NEXT:    cmpq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; WINDOWS-NEXT:    xorq %rsp, %rax
+; WINDOWS-NEXT:    movq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    cmpq %rax, %rcx
 ; WINDOWS-NEXT:    jne .LBB0_1
 ; WINDOWS-NEXT:  # %bb.2:
 ; WINDOWS-NEXT:    xorl %ecx, %ecx
@@ -26,6 +27,8 @@ define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; WINDOWS-NEXT:    .seh_endepilogue
 ; WINDOWS-NEXT:    jmp h # TAILCALL
 ; WINDOWS-NEXT:  .LBB0_1:
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; WINDOWS-NEXT:    xorq %rsp, %rcx
 ; WINDOWS-NEXT:    callq __security_check_cookie
 ; WINDOWS-NEXT:    int3
 ; WINDOWS-NEXT:    .seh_endproc
@@ -49,7 +52,6 @@ define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; LINUX-NEXT:  .LBB0_2: # %CallStackCheckFailBlk
 ; LINUX-NEXT:    .cfi_def_cfa_offset 32
 ; LINUX-NEXT:    callq __stack_chk_fail at PLT
-
    tail call tailcc void @h(ptr null, i64 0, ptr null)
    ret void
 }
@@ -65,9 +67,10 @@ define void @tailcall_unrelated_frame() sspreq {
 ; WINDOWS-NEXT:    xorq %rsp, %rax
 ; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
 ; WINDOWS-NEXT:    callq bar
-; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
-; WINDOWS-NEXT:    xorq %rsp, %rcx
-; WINDOWS-NEXT:    cmpq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; WINDOWS-NEXT:    xorq %rsp, %rax
+; WINDOWS-NEXT:    movq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    cmpq %rax, %rcx
 ; WINDOWS-NEXT:    jne .LBB1_1
 ; WINDOWS-NEXT:  # %bb.2:
 ; WINDOWS-NEXT:    .seh_startepilogue
@@ -75,6 +78,8 @@ define void @tailcall_unrelated_frame() sspreq {
 ; WINDOWS-NEXT:    .seh_endepilogue
 ; WINDOWS-NEXT:    jmp bar # TAILCALL
 ; WINDOWS-NEXT:  .LBB1_1:
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; WINDOWS-NEXT:    xorq %rsp, %rcx
 ; WINDOWS-NEXT:    callq __security_check_cookie
 ; WINDOWS-NEXT:    int3
 ; WINDOWS-NEXT:    .seh_endproc
@@ -115,9 +120,10 @@ define void @caller() sspreq {
 ; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
 ; WINDOWS-NEXT:    callq callee
 ; WINDOWS-NEXT:    callq callee
-; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
-; WINDOWS-NEXT:    xorq %rsp, %rcx
-; WINDOWS-NEXT:    cmpq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; WINDOWS-NEXT:    xorq %rsp, %rax
+; WINDOWS-NEXT:    movq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    cmpq %rax, %rcx
 ; WINDOWS-NEXT:    jne .LBB2_2
 ; WINDOWS-NEXT:  # %bb.1:
 ; WINDOWS-NEXT:    .seh_startepilogue
@@ -125,6 +131,8 @@ define void @caller() sspreq {
 ; WINDOWS-NEXT:    .seh_endepilogue
 ; WINDOWS-NEXT:    retq
 ; WINDOWS-NEXT:  .LBB2_2:
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; WINDOWS-NEXT:    xorq %rsp, %rcx
 ; WINDOWS-NEXT:    callq __security_check_cookie
 ; WINDOWS-NEXT:    int3
 ; WINDOWS-NEXT:    .seh_endproc

diff  --git a/llvm/test/DebugInfo/COFF/fpo-stack-protect.ll b/llvm/test/DebugInfo/COFF/fpo-stack-protect.ll
index 566d36e87d2b6..d0d724910faf1 100644
--- a/llvm/test/DebugInfo/COFF/fpo-stack-protect.ll
+++ b/llvm/test/DebugInfo/COFF/fpo-stack-protect.ll
@@ -15,7 +15,7 @@
 ; CHECK:         subl    $20, %esp
 ; CHECK:         .cv_fpo_stackalloc      20
 ; CHECK:         .cv_fpo_endprologue
-; CHECK:         ___security_cookie
+; CHECK:         movl    ___security_cookie, %ea
 
 ; CHECK:         movl    28(%esp), %esi
 ; CHECK:         movl    %esi, {{[0-9]*}}(%esp)
@@ -24,13 +24,16 @@
 ; CHECK:         movl    %esi, {{[0-9]*}}(%esp)
 
 ; CHECK:         calll   _escape
-; CHECK:         calll   @__security_check_cookie at 4
+
+; CHECK:         movl    ___security_cookie, %ecx
+; CHECK:         cmpl    %eax, %ecx
 
 ; CHECK:         movl    %esi, %eax
 ; CHECK:         addl    $20, %esp
 ; CHECK:         popl    %esi
 ; CHECK:         retl
 ; CHECK: Ltmp4:
+; CHECK:         calll   @__security_check_cookie at 4
 ; CHECK:         .cv_fpo_endproc
 
 ; ModuleID = 't.c'


        


More information about the llvm-commits mailing list