[clang] [llvm] [X86][CodeGen] security check cookie execute only when needed (PR #95904)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 3 08:09:46 PDT 2024


https://github.com/mahesh-attarde updated https://github.com/llvm/llvm-project/pull/95904

>From 6d6619f8f7a37906ac45791487a4d63b51a48ad1 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Wed, 12 Jun 2024 06:15:51 -0700
Subject: [PATCH 01/12] added regcall strct by reg support

---
 clang/lib/CodeGen/Targets/X86.cpp | 20 ++++++++++++
 clang/test/CodeGen/regcall3.c     | 53 +++++++++++++++++++++++++++++++
 2 files changed, 73 insertions(+)
 create mode 100644 clang/test/CodeGen/regcall3.c

diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index 43dadf5e724ac..506d106ad65b0 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -148,6 +148,7 @@ class X86_32ABIInfo : public ABIInfo {
 
   Class classify(QualType Ty) const;
   ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
+
   ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
                                   unsigned ArgIndex) const;
 
@@ -1306,6 +1307,8 @@ class X86_64ABIInfo : public ABIInfo {
                                            unsigned &NeededSSE,
                                            unsigned &MaxVectorWidth) const;
 
+  bool DoesRegcallStructFitInReg(QualType Ty) const;
+
   bool IsIllegalVectorType(QualType Ty) const;
 
   /// The 0.98 ABI revision clarified a lot of ambiguities,
@@ -2830,6 +2833,20 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
   return ABIArgInfo::getDirect(ResType);
 }
 
+bool X86_64ABIInfo::DoesRegcallStructFitInReg(QualType Ty) const {
+  auto RT = Ty->castAs<RecordType>();
+  // For Integer class, Max GPR Size is 64
+  if (getContext().getTypeSize(Ty) > 64)
+    return false;
+  // Struct At hand must not have other non Builtin types
+  for (const auto *FD : RT->getDecl()->fields()) {
+    QualType MTy = FD->getType();
+    if (!MTy->isBuiltinType())
+      return false;
+  }
+  return true;
+}
+
 ABIArgInfo
 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
                                              unsigned &NeededSSE,
@@ -2837,6 +2854,9 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
   auto RT = Ty->getAs<RecordType>();
   assert(RT && "classifyRegCallStructType only valid with struct types");
 
+  if (DoesRegcallStructFitInReg(Ty))
+    return classifyArgumentType(Ty, UINT_MAX, NeededInt, NeededSSE, true, true);
+
   if (RT->getDecl()->hasFlexibleArrayMember())
     return getIndirectReturnResult(Ty);
 
diff --git a/clang/test/CodeGen/regcall3.c b/clang/test/CodeGen/regcall3.c
new file mode 100644
index 0000000000000..1c83407220861
--- /dev/null
+++ b/clang/test/CodeGen/regcall3.c
@@ -0,0 +1,53 @@
+// RUN: %clang_cc1 -S %s -o - -ffreestanding -triple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=LINUX64
+
+#include <xmmintrin.h>
+struct struct1 { int x; int y; };
+void __regcall v6(int a, float b, struct struct1 c) {}
+
+void v6_caller(){
+    struct struct1 c0;
+    c0.x = 0xa0a0; c0.y = 0xb0b0;
+    int x= 0xf0f0, y = 0x0f0f;
+    v6(x,y,c0);
+}
+
+// LINUX64-LABEL: __regcall3__v6
+// LINUX64: movq	%rcx, -8(%rsp)
+// LINUX64: movl	%eax, -12(%rsp)
+// LINUX64: movss	%xmm0, -16(%rsp)
+
+// LINUX64-LABEL: v6_caller
+// LINUX64: movl	$41120, 16(%rsp)                # imm = 0xA0A0
+// LINUX64: movl	$45232, 20(%rsp)                # imm = 0xB0B0
+// LINUX64: movl	$61680, 12(%rsp)                # imm = 0xF0F0
+// LINUX64: movl	$3855, 8(%rsp)                  # imm = 0xF0F
+// LINUX64: movl	12(%rsp), %eax
+// LINUX64: cvtsi2ssl	8(%rsp), %xmm0
+// LINUX64: movq	16(%rsp), %rcx
+// LINUX64: callq	.L__regcall3__v6$local
+
+
+struct struct2 { int x; float y; };
+void __regcall v31(int a, float b, struct struct2 c) {}
+
+void v31_caller(){
+    struct struct2 c0;
+    c0.x = 0xa0a0; c0.y = 0xb0b0;
+    int x= 0xf0f0, y = 0x0f0f;
+    v31(x,y,c0);
+}
+
+// LINUX64: __regcall3__v31:                        # @__regcall3__v31
+// LINUX64: 	movq	%rcx, -8(%rsp)
+// LINUX64: 	movl	%eax, -12(%rsp)
+// LINUX64: 	movss	%xmm0, -16(%rsp)
+// LINUX64: v31_caller:                             # @v31_caller
+// LINUX64: 	movl	$41120, 16(%rsp)                # imm = 0xA0A0
+// LINUX64: 	movss	.LCPI3_0(%rip), %xmm0           # xmm0 = [4.5232E+4,0.0E+0,0.0E+0,0.0E+0]
+// LINUX64: 	movss	%xmm0, 20(%rsp)
+// LINUX64: 	movl	$61680, 12(%rsp)                # imm = 0xF0F0
+// LINUX64: 	movl	$3855, 8(%rsp)                  # imm = 0xF0F
+// LINUX64: 	movl	12(%rsp), %eax
+// LINUX64: 	cvtsi2ssl	8(%rsp), %xmm0
+// LINUX64: 	movq	16(%rsp), %rcx
+// LINUX64: 	callq	.L__regcall3__v31$local

>From 8bdd245edd8dca9477d6541401737f2aeaf6e820 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Tue, 18 Jun 2024 03:33:02 -0700
Subject: [PATCH 02/12] selectively call security cookie check

---
 llvm/lib/Target/X86/CMakeLists.txt            |   1 +
 llvm/lib/Target/X86/X86.h                     |   4 +
 .../lib/Target/X86/X86FixupStackProtector.cpp | 249 ++++++++++++++++++
 llvm/lib/Target/X86/X86TargetMachine.cpp      |   1 +
 llvm/test/CodeGen/X86/opt-pipeline.ll         |   1 +
 llvm/test/CodeGen/X86/stack-protector-msvc.ll |  22 +-
 llvm/test/CodeGen/X86/tailcc-ssp.ll           |  27 +-
 7 files changed, 296 insertions(+), 9 deletions(-)
 create mode 100644 llvm/lib/Target/X86/X86FixupStackProtector.cpp

diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 44a54c8ec62cb..5303758ff8a2e 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -48,6 +48,7 @@ set(sources
   X86AvoidStoreForwardingBlocks.cpp
   X86DynAllocaExpander.cpp
   X86FixupSetCC.cpp
+  X86FixupStackProtector.cpp
   X86FlagsCopyLowering.cpp
   X86FloatingPoint.cpp
   X86FrameLowering.cpp
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index fdb9e4cad5e89..b4432f45987cd 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -73,6 +73,9 @@ FunctionPass *createX86OptimizeLEAs();
 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
 FunctionPass *createX86FixupSetCC();
 
+/// Return a pass that transform inline stack protector into seperate bb
+FunctionPass *createX86FixupStackProtectorPass();
+
 /// Return a pass that avoids creating store forward block issues in the hardware.
 FunctionPass *createX86AvoidStoreForwardingBlocks();
 
@@ -186,6 +189,7 @@ void initializeX86ExpandPseudoPass(PassRegistry &);
 void initializeX86FastPreTileConfigPass(PassRegistry &);
 void initializeX86FastTileConfigPass(PassRegistry &);
 void initializeX86FixupSetCCPassPass(PassRegistry &);
+void initializeX86FixupStackProtectorPassPass(PassRegistry &);
 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86FixupStackProtector.cpp b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
new file mode 100644
index 0000000000000..f1355c62cc2c1
--- /dev/null
+++ b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
@@ -0,0 +1,249 @@
+//===---- X86FixupStackProtector.cpp Fix Stack Protector Call ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Stack Protector implementation inserts platform specific callback into code.
+// For windows __security_check_cookie call gets call everytime function is
+// return without fixup. Since this function is defined in runtime library, it
+// incures cost of call in dll which simply does comparison and returns most
+// time. With Fixup, We selective move to call in DLL only if comparison fails.
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86FrameLowering.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Module.h"
+#include <iterator>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-fixup-spcall"
+
+namespace {
+
+class X86FixupStackProtectorPass : public MachineFunctionPass {
+public:
+  static char ID;
+
+  X86FixupStackProtectorPass() : MachineFunctionPass(ID) {}
+
+  StringRef getPassName() const override { return "X86 Fixup Stack Protector"; }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  std::pair<MachineBasicBlock *, MachineInstr *>
+  getSecurityCheckerBasicBlock(MachineFunction &MF);
+
+  void getGuardCheckSequence(MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
+                             MachineInstr *SeqMI[5]);
+ 
+  void SplitBasicBlock(MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
+                       MachineBasicBlock::iterator SplitIt);
+
+  void FinishBlock(MachineBasicBlock *MBB);
+
+  void FinishFunction(MachineBasicBlock *FailMBB, MachineBasicBlock *NewRetMBB);
+
+  std::pair<MachineInstr *, MachineInstr *>
+  CreateFailCheckSequence(MachineBasicBlock *CurMBB, MachineBasicBlock *FailMBB,
+                          MachineInstr *SeqMI[5]);
+};
+} // end anonymous namespace
+
+char X86FixupStackProtectorPass::ID = 0;
+
+INITIALIZE_PASS(X86FixupStackProtectorPass, DEBUG_TYPE, DEBUG_TYPE, false,
+                false)
+
+FunctionPass *llvm::createX86FixupStackProtectorPass() {
+  return new X86FixupStackProtectorPass();
+}
+
+void X86FixupStackProtectorPass::SplitBasicBlock(
+    MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
+    MachineBasicBlock::iterator SplitIt) {
+  NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplitIt, CurMBB->end());
+}
+
+std::pair<MachineBasicBlock *, MachineInstr *>
+X86FixupStackProtectorPass::getSecurityCheckerBasicBlock(MachineFunction &MF) {
+  MachineBasicBlock::reverse_iterator RBegin, REnd;
+
+  for (auto &MBB : llvm::reverse(MF)) {
+    for (RBegin = MBB.rbegin(), REnd = MBB.rend(); RBegin != REnd; RBegin++) {
+      auto &MI = *RBegin;
+      if (MI.getOpcode() == X86::CALL64pcrel32 &&
+          MI.getNumExplicitOperands() == 1) {
+        auto MO = MI.getOperand(0);
+        if (MO.isGlobal()) {
+          auto Callee = dyn_cast<Function>(MO.getGlobal());
+          if (Callee && Callee->getName() == "__security_check_cookie") {
+            return std::make_pair(&MBB, &MI);
+            break;
+          }
+        }
+      }
+    }
+  }
+  return std::make_pair(nullptr, nullptr);
+}
+
+void X86FixupStackProtectorPass::getGuardCheckSequence(
+    MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
+    MachineInstr *SeqMI[5]) {
+
+  MachineBasicBlock::iterator UIt(CheckCall);
+  MachineBasicBlock::reverse_iterator DIt(CheckCall);
+  // Seq From StackUp to Stack Down Is fixed.
+  // ADJCALLSTACKUP64
+  UIt++;
+  SeqMI[4] = &*UIt;
+
+  // CALL __security_check_cookie
+  SeqMI[3] = CheckCall;
+
+  // COPY function slot cookie
+  DIt++;
+  SeqMI[2] = &*DIt;
+
+  // ADJCALLSTACKDOWN64
+  DIt++;
+  SeqMI[1] = &*DIt;
+
+  MachineBasicBlock::reverse_iterator XIt(SeqMI[1]);
+  for (; XIt != CurMBB->rbegin(); XIt++) {
+    auto &CI = *XIt;
+    if ((CI.getOpcode() == X86::XOR64_FP) || (CI.getOpcode() == X86::XOR32_FP))
+      break;
+  }
+  SeqMI[0] = &*XIt;
+}
+
+std::pair<MachineInstr *, MachineInstr *>
+X86FixupStackProtectorPass::CreateFailCheckSequence(MachineBasicBlock *CurMBB,
+                                                    MachineBasicBlock *FailMBB,
+                                                    MachineInstr *SeqMI[5]) {
+
+  auto MF = CurMBB->getParent();
+
+  Module &M = *MF->getFunction().getParent();
+  GlobalVariable *GV = M.getGlobalVariable("__security_cookie");
+  assert(GV && " Security Cookie was not installed!");
+
+  MachineRegisterInfo &MRI = MF->getRegInfo();
+  const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+
+  MachineInstr *GuardXor = SeqMI[0];
+  MachineBasicBlock::iterator InsertPt(GuardXor);
+  InsertPt++;
+  unsigned DestReg = MRI.createVirtualRegister(&X86::GR64RegClass);
+  // MOV security_cookie value into register
+  auto CMI =
+      BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::MOV64rm), DestReg)
+          .addReg(X86::RIP)
+          .addImm(1)
+          .addReg(X86::NoRegister)
+          .addGlobalAddress(GV)
+          .addReg(X86::NoRegister);
+
+  // Compare security_Cookie with XOR_Val, if not same, we have violation
+  BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
+      .addReg(DestReg)
+      .addReg(GuardXor->getOperand(0).getReg());
+
+  BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::JCC_1))
+      .addMBB(FailMBB)
+      .addImm(X86::COND_NE);
+
+  auto JMI = BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::JMP_1));
+
+  return std::make_pair(CMI.getInstr(), JMI.getInstr());
+}
+
+void X86FixupStackProtectorPass::FinishBlock(MachineBasicBlock *MBB) {
+  LivePhysRegs LiveRegs;
+  computeAndAddLiveIns(LiveRegs, *MBB);
+}
+
+void X86FixupStackProtectorPass::FinishFunction(MachineBasicBlock *FailMBB,
+                                                MachineBasicBlock *NewRetMBB) {
+  FailMBB->getParent()->RenumberBlocks();
+  // FailMBB includes call to MSCV RT  where is __security_check_cookie
+  // function is called. This function uses regcall and it expects cookie
+  // value from stack slot.( even if this is modified)
+  // Before going further we compute back livein for this block to make sure
+  // it is live and provided.
+  FinishBlock(FailMBB);
+  FinishBlock(NewRetMBB);
+}
+
+bool X86FixupStackProtectorPass::runOnMachineFunction(MachineFunction &MF) {
+  bool Changed = false;
+  const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+
+  if (!(STI.isTargetWindowsItanium() || STI.isTargetWindowsMSVC()))
+    return Changed;
+
+  // Check if security cookie was installed or not
+  Module &M = *MF.getFunction().getParent();
+  GlobalVariable *GV = M.getGlobalVariable("__security_cookie");
+  if (!GV)
+    return Changed;
+
+  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+
+  // Check if security check cookie was installed or not
+  auto [CurMBB, CheckCall] = getSecurityCheckerBasicBlock(MF);
+
+  if (!CheckCall)
+    return Changed;
+
+  MachineBasicBlock *FailMBB = MF.CreateMachineBasicBlock();
+  MachineBasicBlock *NewRetMBB = MF.CreateMachineBasicBlock();
+
+  MF.insert(MF.end(), NewRetMBB);
+  MF.insert(MF.end(), FailMBB);
+
+  MachineInstr *SeqMI[5];
+  getGuardCheckSequence(CurMBB, CheckCall, SeqMI);
+  // MachineInstr * GuardXor  = SeqMI[0];
+
+  auto FailSeqRange = CreateFailCheckSequence(CurMBB, FailMBB, SeqMI);
+  MachineInstrBuilder JMI(MF, FailSeqRange.second);
+
+  // After Inserting JMP_1, we can not have two terminators
+  // in same block, split CurrentMBB after JMP_1
+  MachineBasicBlock::iterator SplitIt(SeqMI[4]);
+  SplitIt++;
+  SplitBasicBlock(CurMBB, NewRetMBB, SplitIt);
+
+  // Fill up Failure Routine, move Fail Check Squence from CurMBB to FailMBB
+  MachineBasicBlock::iterator U1It(SeqMI[1]);
+  MachineBasicBlock::iterator U2It(SeqMI[4]);
+  U2It++;
+  FailMBB->splice(FailMBB->end(), CurMBB, U1It, U2It);
+  BuildMI(*FailMBB, FailMBB->end(), DebugLoc(), TII->get(X86::INT3));
+
+  // Move left over instruction after StackUp
+  // from Current Basic BLocks into New Return Block
+  JMI.addMBB(NewRetMBB);
+  MachineBasicBlock::iterator SplicePt(JMI.getInstr());
+  SplicePt++;
+  if (SplicePt != CurMBB->end())
+    NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplicePt);
+
+  // Restructure Basic Blocks
+  CurMBB->addSuccessor(NewRetMBB);
+  CurMBB->addSuccessor(FailMBB);
+
+  FinishFunction(FailMBB, NewRetMBB);
+  return !Changed;
+}
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index d4e642c7df9cf..b245e80ad18dc 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -550,6 +550,7 @@ bool X86PassConfig::addPreISel() {
 void X86PassConfig::addPreRegAlloc() {
   if (getOptLevel() != CodeGenOptLevel::None) {
     addPass(&LiveRangeShrinkID);
+    addPass(createX86FixupStackProtectorPass());
     addPass(createX86FixupSetCC());
     addPass(createX86OptimizeLEAs());
     addPass(createX86CallFrameOptimization());
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 15c496bfb7f66..631f955ee6cc0 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -119,6 +119,7 @@
 ; CHECK-NEXT:       Peephole Optimizations
 ; CHECK-NEXT:       Remove dead machine instructions
 ; CHECK-NEXT:       Live Range Shrink
+; CHECK-NEXT:       X86 Fixup Stack Protector
 ; CHECK-NEXT:       X86 Fixup SetCC
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       X86 LEA Optimize
diff --git a/llvm/test/CodeGen/X86/stack-protector-msvc.ll b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
index d0b6585f40ffe..f8eb47663fb18 100644
--- a/llvm/test/CodeGen/X86/stack-protector-msvc.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
@@ -38,8 +38,13 @@ return:    ; preds = %entry
 ; MSVC-X64: callq strcpy
 ; MSVC-X64: movq [[SLOT]](%rsp), %rcx
 ; MSVC-X64: xorq %rsp, %rcx
-; MSVC-X64: callq __security_check_cookie
+; MSVC-X64: movq	__security_cookie(%rip), %rax
+; MSVC-X64: cmpq	%rcx, %rax
+; MSVC-X64: jne	.LBB0_2
 ; MSVC-X64: retq
+; MSVC-X64: LBB0_2:
+; MSVC-X64: callq __security_check_cookie
+; MSVC-X64: int3
 
 ; MSVC-X86-O0-LABEL: _test:
 ; MSVC-X86-O0: movl ___security_cookie, %[[REG1:[^ ]*]]
@@ -97,9 +102,13 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; MSVC-X64: callq escape
 ; MSVC-X64: movq [[SLOT]](%rbp), %rcx
 ; MSVC-X64: xorq %rbp, %rcx
-; MSVC-X64: callq __security_check_cookie
+; MSVC-X64:	movq	__security_cookie(%rip), %rax
+; MSVC-X64:	cmpq	%rcx, %rax
+; MSVC-X64:	jne	.LBB1_2
 ; MSVC-X64: retq
-
+; MSVC-X64: LBB1_2
+; MSVC-X64: callq __security_check_cookie
+; MSVC-X64: int3
 
 ; This case is interesting because we address local variables with RBX but XOR
 ; the guard value with RBP. That's fine, either value will do, as long as they
@@ -148,11 +157,14 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X64: callq escape
 ; MSVC-X64: movq [[SLOT]](%rbx), %rcx
 ; MSVC-X64: xorq %rbp, %rcx
+; MSVC-X64: movq	__security_cookie(%rip), %rax
+; MSVC-X64: cmpq	%rcx, %rax
+; MSVC-X64: jne	.LBB2_2
+; MSVC-X64: retq 
 ; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: retq
+; MSVC-X64: int3
 
 
 declare ptr @strcpy(ptr, ptr) nounwind
 
 declare i32 @printf(ptr, ...) nounwind
-
diff --git a/llvm/test/CodeGen/X86/tailcc-ssp.ll b/llvm/test/CodeGen/X86/tailcc-ssp.ll
index bb9b4429c0761..ad9f4b9d6a4b6 100644
--- a/llvm/test/CodeGen/X86/tailcc-ssp.ll
+++ b/llvm/test/CodeGen/X86/tailcc-ssp.ll
@@ -5,9 +5,20 @@ declare void @h(ptr, i64, ptr)
 
 define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; WINDOWS-LABEL: tailcall_frame:
-; WINDOWS: callq __security_check_cookie
+; WINDOWS: movq	__security_cookie(%rip), %rax
+; WINDOWS: xorq	%rsp, %rax
+; WINDOWS: movq	%rax, {{[0-9]*}}(%rsp)
+; WINDOWS: movq	 {{[0-9]*}}(%rsp), %rcx
+; WINDOWS: xorq	%rsp, %rcx
+; WINDOWS: movq	__security_cookie(%rip), %rax
+; WINDOWS: cmpq	%rcx, %rax
+; WINDOWS: jne	.LBB0_1
 ; WINDOWS: xorl %ecx, %ecx
 ; WINDOWS: jmp h
+; WINDOWS: .LBB0_1
+; WINDOWS: callq __security_check_cookie
+; WINDOWS: int3
+
 
 ; LINUX-LABEL: tailcall_frame:
 ; LINUX: jne
@@ -22,10 +33,18 @@ declare void @bar()
 define void @tailcall_unrelated_frame() sspreq {
 ; WINDOWS-LABEL: tailcall_unrelated_frame:
 ; WINDOWS: subq [[STACK:\$.*]], %rsp
+; WINDOWS: movq	__security_cookie(%rip), %rax
+; WINDOWS: xorq	%rsp, %rax
 ; WINDOWS: callq bar
-; WINDOWS: callq __security_check_cookie
-; WINDOWS: addq [[STACK]], %rsp
-; WINDOWS: jmp bar
+; WINDOWS: movq	 {{[0-9]*}}(%rsp), %rcx
+; WINDOWS: xorq	%rsp, %rcx
+; WINDOWS: movq	__security_cookie(%rip), %rax
+; WINDOWS: cmpq	%rcx, %rax
+; WINDOWS: jne	.LBB1_1
+; WINDOWS: jmp	bar
+; WINDOWS: .LBB1_1
+; WINDOWS: callq	__security_check_cookie
+; WINDOWS: int3
 
 ; LINUX-LABEL: tailcall_unrelated_frame:
 ; LINUX: callq bar

>From 116978cb3ae8e69077da121f5b85464de5435ad7 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Tue, 18 Jun 2024 08:03:06 -0700
Subject: [PATCH 03/12] Revert "added regcall strct by reg support"

This reverts commit 6d6619f8f7a37906ac45791487a4d63b51a48ad1.
---
 clang/lib/CodeGen/Targets/X86.cpp | 20 ------------
 clang/test/CodeGen/regcall3.c     | 53 -------------------------------
 2 files changed, 73 deletions(-)
 delete mode 100644 clang/test/CodeGen/regcall3.c

diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index 506d106ad65b0..43dadf5e724ac 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -148,7 +148,6 @@ class X86_32ABIInfo : public ABIInfo {
 
   Class classify(QualType Ty) const;
   ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
-
   ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
                                   unsigned ArgIndex) const;
 
@@ -1307,8 +1306,6 @@ class X86_64ABIInfo : public ABIInfo {
                                            unsigned &NeededSSE,
                                            unsigned &MaxVectorWidth) const;
 
-  bool DoesRegcallStructFitInReg(QualType Ty) const;
-
   bool IsIllegalVectorType(QualType Ty) const;
 
   /// The 0.98 ABI revision clarified a lot of ambiguities,
@@ -2833,20 +2830,6 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
   return ABIArgInfo::getDirect(ResType);
 }
 
-bool X86_64ABIInfo::DoesRegcallStructFitInReg(QualType Ty) const {
-  auto RT = Ty->castAs<RecordType>();
-  // For Integer class, Max GPR Size is 64
-  if (getContext().getTypeSize(Ty) > 64)
-    return false;
-  // Struct At hand must not have other non Builtin types
-  for (const auto *FD : RT->getDecl()->fields()) {
-    QualType MTy = FD->getType();
-    if (!MTy->isBuiltinType())
-      return false;
-  }
-  return true;
-}
-
 ABIArgInfo
 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
                                              unsigned &NeededSSE,
@@ -2854,9 +2837,6 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
   auto RT = Ty->getAs<RecordType>();
   assert(RT && "classifyRegCallStructType only valid with struct types");
 
-  if (DoesRegcallStructFitInReg(Ty))
-    return classifyArgumentType(Ty, UINT_MAX, NeededInt, NeededSSE, true, true);
-
   if (RT->getDecl()->hasFlexibleArrayMember())
     return getIndirectReturnResult(Ty);
 
diff --git a/clang/test/CodeGen/regcall3.c b/clang/test/CodeGen/regcall3.c
deleted file mode 100644
index 1c83407220861..0000000000000
--- a/clang/test/CodeGen/regcall3.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// RUN: %clang_cc1 -S %s -o - -ffreestanding -triple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=LINUX64
-
-#include <xmmintrin.h>
-struct struct1 { int x; int y; };
-void __regcall v6(int a, float b, struct struct1 c) {}
-
-void v6_caller(){
-    struct struct1 c0;
-    c0.x = 0xa0a0; c0.y = 0xb0b0;
-    int x= 0xf0f0, y = 0x0f0f;
-    v6(x,y,c0);
-}
-
-// LINUX64-LABEL: __regcall3__v6
-// LINUX64: movq	%rcx, -8(%rsp)
-// LINUX64: movl	%eax, -12(%rsp)
-// LINUX64: movss	%xmm0, -16(%rsp)
-
-// LINUX64-LABEL: v6_caller
-// LINUX64: movl	$41120, 16(%rsp)                # imm = 0xA0A0
-// LINUX64: movl	$45232, 20(%rsp)                # imm = 0xB0B0
-// LINUX64: movl	$61680, 12(%rsp)                # imm = 0xF0F0
-// LINUX64: movl	$3855, 8(%rsp)                  # imm = 0xF0F
-// LINUX64: movl	12(%rsp), %eax
-// LINUX64: cvtsi2ssl	8(%rsp), %xmm0
-// LINUX64: movq	16(%rsp), %rcx
-// LINUX64: callq	.L__regcall3__v6$local
-
-
-struct struct2 { int x; float y; };
-void __regcall v31(int a, float b, struct struct2 c) {}
-
-void v31_caller(){
-    struct struct2 c0;
-    c0.x = 0xa0a0; c0.y = 0xb0b0;
-    int x= 0xf0f0, y = 0x0f0f;
-    v31(x,y,c0);
-}
-
-// LINUX64: __regcall3__v31:                        # @__regcall3__v31
-// LINUX64: 	movq	%rcx, -8(%rsp)
-// LINUX64: 	movl	%eax, -12(%rsp)
-// LINUX64: 	movss	%xmm0, -16(%rsp)
-// LINUX64: v31_caller:                             # @v31_caller
-// LINUX64: 	movl	$41120, 16(%rsp)                # imm = 0xA0A0
-// LINUX64: 	movss	.LCPI3_0(%rip), %xmm0           # xmm0 = [4.5232E+4,0.0E+0,0.0E+0,0.0E+0]
-// LINUX64: 	movss	%xmm0, 20(%rsp)
-// LINUX64: 	movl	$61680, 12(%rsp)                # imm = 0xF0F0
-// LINUX64: 	movl	$3855, 8(%rsp)                  # imm = 0xF0F
-// LINUX64: 	movl	12(%rsp), %eax
-// LINUX64: 	cvtsi2ssl	8(%rsp), %xmm0
-// LINUX64: 	movq	16(%rsp), %rcx
-// LINUX64: 	callq	.L__regcall3__v31$local

>From fbb566b1633150d3e3702f4abc18b53599ccda88 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Tue, 18 Jun 2024 12:26:05 -0700
Subject: [PATCH 04/12] address review comments for mov

---
 .../lib/Target/X86/X86FixupStackProtector.cpp |  20 +-
 llvm/test/CodeGen/X86/stack-protector-msvc.ll | 335 +++++++++++++++++-
 llvm/test/CodeGen/X86/tailcc-ssp.ll           | 114 ++++--
 3 files changed, 420 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FixupStackProtector.cpp b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
index f1355c62cc2c1..d8c31e544d348 100644
--- a/llvm/lib/Target/X86/X86FixupStackProtector.cpp
+++ b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
@@ -138,26 +138,20 @@ X86FixupStackProtectorPass::CreateFailCheckSequence(MachineBasicBlock *CurMBB,
   GlobalVariable *GV = M.getGlobalVariable("__security_cookie");
   assert(GV && " Security Cookie was not installed!");
 
-  MachineRegisterInfo &MRI = MF->getRegInfo();
   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
 
   MachineInstr *GuardXor = SeqMI[0];
   MachineBasicBlock::iterator InsertPt(GuardXor);
   InsertPt++;
-  unsigned DestReg = MRI.createVirtualRegister(&X86::GR64RegClass);
-  // MOV security_cookie value into register
-  auto CMI =
-      BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::MOV64rm), DestReg)
-          .addReg(X86::RIP)
-          .addImm(1)
-          .addReg(X86::NoRegister)
-          .addGlobalAddress(GV)
-          .addReg(X86::NoRegister);
 
   // Compare security_Cookie with XOR_Val, if not same, we have violation
-  BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
-      .addReg(DestReg)
-      .addReg(GuardXor->getOperand(0).getReg());
+  auto CMI = BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rm))
+                 .addReg(GuardXor->getOperand(0).getReg())
+                 .addReg(X86::RIP)
+                 .addImm(1)
+                 .addReg(X86::NoRegister)
+                 .addGlobalAddress(GV)
+                 .addReg(X86::NoRegister);
 
   BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::JCC_1))
       .addMBB(FailMBB)
diff --git a/llvm/test/CodeGen/X86/stack-protector-msvc.ll b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
index f8eb47663fb18..75f8b613b1b95 100644
--- a/llvm/test/CodeGen/X86/stack-protector-msvc.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=i386-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X86 %s
 ; RUN: llc -mtriple=x86_64-pc-windows-msvc < %s -o - | FileCheck -check-prefix=MSVC-X64 %s
 
@@ -8,6 +9,100 @@
 @"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"    ; <ptr> [#uses=1]
 
 define void @test(ptr %a) nounwind ssp {
+; MSVC-X86-LABEL: test:
+; MSVC-X86:       # %bb.0: # %entry
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    subl $12, %esp
+; MSVC-X86-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-NEXT:    xorl %esp, %eax
+; MSVC-X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    movl %esp, %esi
+; MSVC-X86-NEXT:    pushl {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    calll _strcpy
+; MSVC-X86-NEXT:    addl $8, %esp
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    pushl $LC
+; MSVC-X86-NEXT:    calll _printf
+; MSVC-X86-NEXT:    addl $8, %esp
+; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-NEXT:    xorl %esp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    addl $12, %esp
+; MSVC-X86-NEXT:    popl %esi
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test:
+; MSVC-X64:       # %bb.0: # %entry
+; MSVC-X64-NEXT:    pushq %rsi
+; MSVC-X64-NEXT:    subq $64, %rsp
+; MSVC-X64-NEXT:    movq %rcx, %rdx
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rsp, %rax
+; MSVC-X64-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rsi
+; MSVC-X64-NEXT:    movq %rsi, %rcx
+; MSVC-X64-NEXT:    callq strcpy
+; MSVC-X64-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-NEXT:    movq %rsi, %rdx
+; MSVC-X64-NEXT:    callq printf
+; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB0_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    addq $64, %rsp
+; MSVC-X64-NEXT:    popq %rsi
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB0_2:
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test:
+; MSVC-X86-O0:       # %bb.0: # %entry
+; MSVC-X86-O0-NEXT:    subl $20, %esp
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    xorl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, 4(%eax)
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, (%eax)
+; MSVC-X86-O0-NEXT:    calll _strcpy
+; MSVC-X86-O0-NEXT:    leal LC, %ecx
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, (%esp)
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    calll _printf
+; MSVC-X86-O0-NEXT:  # %bb.1: # %return
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %esp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    addl $20, %esp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test:
+; MSVC-X64-O0:       # %bb.0: # %entry
+; MSVC-X64-O0-NEXT:    subq $56, %rsp
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    callq strcpy
+; MSVC-X64-O0-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    callq printf
+; MSVC-X64-O0-NEXT:  # %bb.1: # %return
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    addq $56, %rsp
+; MSVC-X64-O0-NEXT:    retq
 entry:
  %a_addr = alloca ptr    ; <ptr> [#uses=2]
  %buf = alloca [8 x i8]    ; <ptr> [#uses=2]
@@ -70,6 +165,107 @@ return:    ; preds = %entry
 declare void @escape(ptr)
 
 define void @test_vla(i32 %n) nounwind ssp {
+; MSVC-X86-LABEL: test_vla:
+; MSVC-X86:       # %bb.0:
+; MSVC-X86-NEXT:    pushl %ebp
+; MSVC-X86-NEXT:    movl %esp, %ebp
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    movl %ecx, -4(%ebp)
+; MSVC-X86-NEXT:    shll $2, %eax
+; MSVC-X86-NEXT:    calll __chkstk
+; MSVC-X86-NEXT:    movl %esp, %eax
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    movl %ebp, %esp
+; MSVC-X86-NEXT:    popl %ebp
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test_vla:
+; MSVC-X64:       # %bb.0:
+; MSVC-X64-NEXT:    pushq %rbp
+; MSVC-X64-NEXT:    subq $16, %rsp
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq %rax, -8(%rbp)
+; MSVC-X64-NEXT:    movl %ecx, %eax
+; MSVC-X64-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-NEXT:    andq $-16, %rax
+; MSVC-X64-NEXT:    callq __chkstk
+; MSVC-X64-NEXT:    subq %rax, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rcx
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB1_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    movq %rbp, %rsp
+; MSVC-X64-NEXT:    popq %rbp
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB1_2:
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test_vla:
+; MSVC-X86-O0:       # %bb.0:
+; MSVC-X86-O0-NEXT:    pushl %ebp
+; MSVC-X86-O0-NEXT:    movl %esp, %ebp
+; MSVC-X86-O0-NEXT:    pushl %eax
+; MSVC-X86-O0-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, -4(%ebp)
+; MSVC-X86-O0-NEXT:    shll $2, %eax
+; MSVC-X86-O0-NEXT:    calll __chkstk
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    subl $4, %esp
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    addl $4, %esp
+; MSVC-X86-O0-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    movl %ebp, %esp
+; MSVC-X86-O0-NEXT:    popl %ebp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test_vla:
+; MSVC-X64-O0:       # %bb.0:
+; MSVC-X64-O0-NEXT:    pushq %rbp
+; MSVC-X64-O0-NEXT:    subq $16, %rsp
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, -8(%rbp)
+; MSVC-X64-O0-NEXT:    movl %ecx, %eax
+; MSVC-X64-O0-NEXT:    # kill: def $rax killed $eax
+; MSVC-X64-O0-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-O0-NEXT:    andq $-16, %rax
+; MSVC-X64-O0-NEXT:    callq __chkstk
+; MSVC-X64-O0-NEXT:    subq %rax, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    addq $32, %rsp
+; MSVC-X64-O0-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    movq %rbp, %rsp
+; MSVC-X64-O0-NEXT:    popq %rbp
+; MSVC-X64-O0-NEXT:    retq
   %vla = alloca i32, i32 %n
   call void @escape(ptr %vla)
   ret void
@@ -115,6 +311,143 @@ define void @test_vla(i32 %n) nounwind ssp {
 ; are the same across the life of the frame.
 
 define void @test_vla_realign(i32 %n) nounwind ssp {
+; MSVC-X86-LABEL: test_vla_realign:
+; MSVC-X86:       # %bb.0:
+; MSVC-X86-NEXT:    pushl %ebp
+; MSVC-X86-NEXT:    movl %esp, %ebp
+; MSVC-X86-NEXT:    pushl %edi
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    andl $-32, %esp
+; MSVC-X86-NEXT:    subl $32, %esp
+; MSVC-X86-NEXT:    movl %esp, %esi
+; MSVC-X86-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    movl %ecx, 12(%esi)
+; MSVC-X86-NEXT:    shll $2, %eax
+; MSVC-X86-NEXT:    calll __chkstk
+; MSVC-X86-NEXT:    movl %esp, %edi
+; MSVC-X86-NEXT:    movl %esi, %eax
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    pushl %edi
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    movl 12(%esi), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    leal -8(%ebp), %esp
+; MSVC-X86-NEXT:    popl %esi
+; MSVC-X86-NEXT:    popl %edi
+; MSVC-X86-NEXT:    popl %ebp
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test_vla_realign:
+; MSVC-X64:       # %bb.0:
+; MSVC-X64-NEXT:    pushq %rbp
+; MSVC-X64-NEXT:    pushq %rsi
+; MSVC-X64-NEXT:    pushq %rbx
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-NEXT:    andq $-32, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rbx
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq %rax, 24(%rbx)
+; MSVC-X64-NEXT:    movl %ecx, %eax
+; MSVC-X64-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-NEXT:    andq $-16, %rax
+; MSVC-X64-NEXT:    callq __chkstk
+; MSVC-X64-NEXT:    subq %rax, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rsi
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    movq %rbx, %rcx
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    movq %rsi, %rcx
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    movq 24(%rbx), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB2_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    movq %rbp, %rsp
+; MSVC-X64-NEXT:    popq %rbx
+; MSVC-X64-NEXT:    popq %rsi
+; MSVC-X64-NEXT:    popq %rbp
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB2_2:
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test_vla_realign:
+; MSVC-X86-O0:       # %bb.0:
+; MSVC-X86-O0-NEXT:    pushl %ebp
+; MSVC-X86-O0-NEXT:    movl %esp, %ebp
+; MSVC-X86-O0-NEXT:    pushl %esi
+; MSVC-X86-O0-NEXT:    andl $-32, %esp
+; MSVC-X86-O0-NEXT:    subl $64, %esp
+; MSVC-X86-O0-NEXT:    movl %esp, %esi
+; MSVC-X86-O0-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, 48(%esi)
+; MSVC-X86-O0-NEXT:    shll $2, %eax
+; MSVC-X86-O0-NEXT:    calll __chkstk
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %eax, 28(%esi) # 4-byte Spill
+; MSVC-X86-O0-NEXT:    leal 32(%esi), %eax
+; MSVC-X86-O0-NEXT:    subl $4, %esp
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    movl 28(%esi), %eax # 4-byte Reload
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    addl $4, %esp
+; MSVC-X86-O0-NEXT:    movl 48(%esi), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    leal -4(%ebp), %esp
+; MSVC-X86-O0-NEXT:    popl %esi
+; MSVC-X86-O0-NEXT:    popl %ebp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test_vla_realign:
+; MSVC-X64-O0:       # %bb.0:
+; MSVC-X64-O0-NEXT:    pushq %rbp
+; MSVC-X64-O0-NEXT:    pushq %rbx
+; MSVC-X64-O0-NEXT:    subq $72, %rsp
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-O0-NEXT:    andq $-32, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rbx
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, 64(%rbx)
+; MSVC-X64-O0-NEXT:    movl %ecx, %eax
+; MSVC-X64-O0-NEXT:    # kill: def $rax killed $eax
+; MSVC-X64-O0-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-O0-NEXT:    andq $-16, %rax
+; MSVC-X64-O0-NEXT:    callq __chkstk
+; MSVC-X64-O0-NEXT:    subq %rax, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, 24(%rbx) # 8-byte Spill
+; MSVC-X64-O0-NEXT:    leaq 32(%rbx), %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    movq 24(%rbx), %rcx # 8-byte Reload
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    addq $32, %rsp
+; MSVC-X64-O0-NEXT:    movq 64(%rbx), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    leaq 8(%rbp), %rsp
+; MSVC-X64-O0-NEXT:    popq %rbx
+; MSVC-X64-O0-NEXT:    popq %rbp
+; MSVC-X64-O0-NEXT:    retq
   %realign = alloca i32, align 32
   %vla = alloca i32, i32 %n
   call void @escape(ptr %realign)
@@ -160,7 +493,7 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
 ; MSVC-X64: movq	__security_cookie(%rip), %rax
 ; MSVC-X64: cmpq	%rcx, %rax
 ; MSVC-X64: jne	.LBB2_2
-; MSVC-X64: retq 
+; MSVC-X64: retq
 ; MSVC-X64: callq __security_check_cookie
 ; MSVC-X64: int3
 
diff --git a/llvm/test/CodeGen/X86/tailcc-ssp.ll b/llvm/test/CodeGen/X86/tailcc-ssp.ll
index ad9f4b9d6a4b6..1328e0cfa7124 100644
--- a/llvm/test/CodeGen/X86/tailcc-ssp.ll
+++ b/llvm/test/CodeGen/X86/tailcc-ssp.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=x86_64-windows-msvc %s -o - -verify-machineinstrs | FileCheck %s -check-prefix=WINDOWS
 ; RUN: llc -mtriple=x86_64-linux-gnu    %s -o - -verify-machineinstrs | FileCheck %s -check-prefix=LINUX
 
@@ -5,25 +6,49 @@ declare void @h(ptr, i64, ptr)
 
 define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; WINDOWS-LABEL: tailcall_frame:
-; WINDOWS: movq	__security_cookie(%rip), %rax
-; WINDOWS: xorq	%rsp, %rax
-; WINDOWS: movq	%rax, {{[0-9]*}}(%rsp)
-; WINDOWS: movq	 {{[0-9]*}}(%rsp), %rcx
-; WINDOWS: xorq	%rsp, %rcx
-; WINDOWS: movq	__security_cookie(%rip), %rax
-; WINDOWS: cmpq	%rcx, %rax
-; WINDOWS: jne	.LBB0_1
-; WINDOWS: xorl %ecx, %ecx
-; WINDOWS: jmp h
-; WINDOWS: .LBB0_1
-; WINDOWS: callq __security_check_cookie
-; WINDOWS: int3
+; WINDOWS:       # %bb.0:
+; WINDOWS-NEXT:    subq $56, %rsp
+; WINDOWS-NEXT:    .seh_stackalloc 56
+; WINDOWS-NEXT:    .seh_endprologue
+; WINDOWS-NEXT:    movq __security_cookie(%rip), %rax
+; WINDOWS-NEXT:    xorq %rsp, %rax
+; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; WINDOWS-NEXT:    xorq %rsp, %rcx
+; WINDOWS-NEXT:    cmpq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    jne .LBB0_1
+; WINDOWS-NEXT:  # %bb.2:
+; WINDOWS-NEXT:    xorl %ecx, %ecx
+; WINDOWS-NEXT:    xorl %edx, %edx
+; WINDOWS-NEXT:    xorl %r8d, %r8d
+; WINDOWS-NEXT:    addq $56, %rsp
+; WINDOWS-NEXT:    jmp h # TAILCALL
+; WINDOWS-NEXT:  .LBB0_1:
+; WINDOWS-NEXT:    callq __security_check_cookie
+; WINDOWS-NEXT:    int3
+; WINDOWS-NEXT:    .seh_endproc
+;
+; LINUX-LABEL: tailcall_frame:
+; LINUX:       # %bb.0:
+; LINUX-NEXT:    subq $24, %rsp
+; LINUX-NEXT:    .cfi_def_cfa_offset 32
+; LINUX-NEXT:    movq %fs:40, %rax
+; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; LINUX-NEXT:    movq %fs:40, %rax
+; LINUX-NEXT:    cmpq {{[0-9]+}}(%rsp), %rax
+; LINUX-NEXT:    jne .LBB0_2
+; LINUX-NEXT:  # %bb.1: # %SP_return
+; LINUX-NEXT:    xorl %edi, %edi
+; LINUX-NEXT:    xorl %esi, %esi
+; LINUX-NEXT:    xorl %edx, %edx
+; LINUX-NEXT:    addq $24, %rsp
+; LINUX-NEXT:    .cfi_def_cfa_offset 8
+; LINUX-NEXT:    jmp h at PLT # TAILCALL
+; LINUX-NEXT:  .LBB0_2: # %CallStackCheckFailBlk
+; LINUX-NEXT:    .cfi_def_cfa_offset 32
+; LINUX-NEXT:    callq __stack_chk_fail at PLT
 
 
-; LINUX-LABEL: tailcall_frame:
-; LINUX: jne
-; LINUX: jmp h
-; LINUX: callq __stack_chk_fail
 
    tail call tailcc void @h(ptr null, i64 0, ptr null)
    ret void
@@ -32,25 +57,44 @@ define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 declare void @bar()
 define void @tailcall_unrelated_frame() sspreq {
 ; WINDOWS-LABEL: tailcall_unrelated_frame:
-; WINDOWS: subq [[STACK:\$.*]], %rsp
-; WINDOWS: movq	__security_cookie(%rip), %rax
-; WINDOWS: xorq	%rsp, %rax
-; WINDOWS: callq bar
-; WINDOWS: movq	 {{[0-9]*}}(%rsp), %rcx
-; WINDOWS: xorq	%rsp, %rcx
-; WINDOWS: movq	__security_cookie(%rip), %rax
-; WINDOWS: cmpq	%rcx, %rax
-; WINDOWS: jne	.LBB1_1
-; WINDOWS: jmp	bar
-; WINDOWS: .LBB1_1
-; WINDOWS: callq	__security_check_cookie
-; WINDOWS: int3
-
+; WINDOWS:       # %bb.0:
+; WINDOWS-NEXT:    subq $40, %rsp
+; WINDOWS-NEXT:    .seh_stackalloc 40
+; WINDOWS-NEXT:    .seh_endprologue
+; WINDOWS-NEXT:    movq __security_cookie(%rip), %rax
+; WINDOWS-NEXT:    xorq %rsp, %rax
+; WINDOWS-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; WINDOWS-NEXT:    callq bar
+; WINDOWS-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; WINDOWS-NEXT:    xorq %rsp, %rcx
+; WINDOWS-NEXT:    cmpq __security_cookie(%rip), %rcx
+; WINDOWS-NEXT:    jne .LBB1_1
+; WINDOWS-NEXT:  # %bb.2:
+; WINDOWS-NEXT:    addq $40, %rsp
+; WINDOWS-NEXT:    jmp bar # TAILCALL
+; WINDOWS-NEXT:  .LBB1_1:
+; WINDOWS-NEXT:    callq __security_check_cookie
+; WINDOWS-NEXT:    int3
+; WINDOWS-NEXT:    .seh_endproc
+;
 ; LINUX-LABEL: tailcall_unrelated_frame:
-; LINUX: callq bar
-; LINUX: jne
-; LINUX: jmp bar
-; LINUX: callq __stack_chk_fail
+; LINUX:       # %bb.0:
+; LINUX-NEXT:    pushq %rax
+; LINUX-NEXT:    .cfi_def_cfa_offset 16
+; LINUX-NEXT:    movq %fs:40, %rax
+; LINUX-NEXT:    movq %rax, (%rsp)
+; LINUX-NEXT:    callq bar at PLT
+; LINUX-NEXT:    movq %fs:40, %rax
+; LINUX-NEXT:    cmpq (%rsp), %rax
+; LINUX-NEXT:    jne .LBB1_2
+; LINUX-NEXT:  # %bb.1: # %SP_return
+; LINUX-NEXT:    popq %rax
+; LINUX-NEXT:    .cfi_def_cfa_offset 8
+; LINUX-NEXT:    jmp bar at PLT # TAILCALL
+; LINUX-NEXT:  .LBB1_2: # %CallStackCheckFailBlk
+; LINUX-NEXT:    .cfi_def_cfa_offset 16
+; LINUX-NEXT:    callq __stack_chk_fail at PLT
+
 
   call void @bar()
   tail call void @bar()

>From 2c942b15e14775eff186323c40d70242e237ea9e Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Thu, 20 Jun 2024 03:45:14 -0700
Subject: [PATCH 05/12] fix overwrite by update_llc

---
 llvm/test/CodeGen/X86/stack-protector-msvc.ll | 122 ------------------
 1 file changed, 122 deletions(-)

diff --git a/llvm/test/CodeGen/X86/stack-protector-msvc.ll b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
index 75f8b613b1b95..a70a30bcd8c76 100644
--- a/llvm/test/CodeGen/X86/stack-protector-msvc.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
@@ -116,52 +116,6 @@ return:    ; preds = %entry
  ret void
 }
 
-; MSVC-X86-LABEL: _test:
-; MSVC-X86: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86: xorl %esp, %[[REG1]]
-; MSVC-X86: movl %[[REG1]], [[SLOT:[0-9]*]](%esp)
-; MSVC-X86: calll _strcpy
-; MSVC-X86: movl [[SLOT]](%esp), %ecx
-; MSVC-X86: xorl %esp, %ecx
-; MSVC-X86: calll @__security_check_cookie at 4
-; MSVC-X86: retl
-
-; MSVC-X64-LABEL: test:
-; MSVC-X64: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64: xorq %rsp, %[[REG1]]
-; MSVC-X64: movq %[[REG1]], [[SLOT:[0-9]*]](%rsp)
-; MSVC-X64: callq strcpy
-; MSVC-X64: movq [[SLOT]](%rsp), %rcx
-; MSVC-X64: xorq %rsp, %rcx
-; MSVC-X64: movq	__security_cookie(%rip), %rax
-; MSVC-X64: cmpq	%rcx, %rax
-; MSVC-X64: jne	.LBB0_2
-; MSVC-X64: retq
-; MSVC-X64: LBB0_2:
-; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: int3
-
-; MSVC-X86-O0-LABEL: _test:
-; MSVC-X86-O0: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86-O0: xorl %esp, %[[REG1]]
-; MSVC-X86-O0: movl %[[REG1]], [[SLOT:[0-9]*]](%esp)
-; MSVC-X86-O0: calll _strcpy
-; MSVC-X86-O0: movl [[SLOT]](%esp), %ecx
-; MSVC-X86-O0: xorl %esp, %ecx
-; MSVC-X86-O0: calll @__security_check_cookie at 4
-; MSVC-X86-O0: retl
-
-; MSVC-X64-O0-LABEL: test:
-; MSVC-X64-O0: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64-O0: xorq %rsp, %[[REG1]]
-; MSVC-X64-O0: movq %[[REG1]], [[SLOT:[0-9]*]](%rsp)
-; MSVC-X64-O0: callq strcpy
-; MSVC-X64-O0: movq [[SLOT]](%rsp), %rcx
-; MSVC-X64-O0: xorq %rsp, %rcx
-; MSVC-X64-O0: callq __security_check_cookie
-; MSVC-X64-O0: retq
-
-
 declare void @escape(ptr)
 
 define void @test_vla(i32 %n) nounwind ssp {
@@ -271,40 +225,6 @@ define void @test_vla(i32 %n) nounwind ssp {
   ret void
 }
 
-; MSVC-X86-LABEL: _test_vla:
-; MSVC-X86: pushl %ebp
-; MSVC-X86: movl %esp, %ebp
-; MSVC-X86: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86: xorl %ebp, %[[REG1]]
-; MSVC-X86: movl %[[REG1]], [[SLOT:-[0-9]*]](%ebp)
-; MSVC-X86: calll __chkstk
-; MSVC-X86: pushl
-; MSVC-X86: calll _escape
-; MSVC-X86: movl [[SLOT]](%ebp), %ecx
-; MSVC-X86: xorl %ebp, %ecx
-; MSVC-X86: calll @__security_check_cookie at 4
-; MSVC-X86: movl %ebp, %esp
-; MSVC-X86: popl %ebp
-; MSVC-X86: retl
-
-; MSVC-X64-LABEL: test_vla:
-; MSVC-X64: pushq %rbp
-; MSVC-X64: subq $16, %rsp
-; MSVC-X64: leaq 16(%rsp), %rbp
-; MSVC-X64: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64: xorq %rbp, %[[REG1]]
-; MSVC-X64: movq %[[REG1]], [[SLOT:-[0-9]*]](%rbp)
-; MSVC-X64: callq __chkstk
-; MSVC-X64: callq escape
-; MSVC-X64: movq [[SLOT]](%rbp), %rcx
-; MSVC-X64: xorq %rbp, %rcx
-; MSVC-X64:	movq	__security_cookie(%rip), %rax
-; MSVC-X64:	cmpq	%rcx, %rax
-; MSVC-X64:	jne	.LBB1_2
-; MSVC-X64: retq
-; MSVC-X64: LBB1_2
-; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: int3
 
 ; This case is interesting because we address local variables with RBX but XOR
 ; the guard value with RBP. That's fine, either value will do, as long as they
@@ -455,48 +375,6 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
   ret void
 }
 
-; MSVC-X86-LABEL: _test_vla_realign:
-; MSVC-X86: pushl %ebp
-; MSVC-X86: movl %esp, %ebp
-; MSVC-X86: pushl %esi
-; MSVC-X86: andl $-32, %esp
-; MSVC-X86: subl $32, %esp
-; MSVC-X86: movl %esp, %esi
-; MSVC-X86: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86: xorl %ebp, %[[REG1]]
-; MSVC-X86: movl %[[REG1]], [[SLOT:[0-9]*]](%esi)
-; MSVC-X86: calll __chkstk
-; MSVC-X86: pushl
-; MSVC-X86: calll _escape
-; MSVC-X86: movl [[SLOT]](%esi), %ecx
-; MSVC-X86: xorl %ebp, %ecx
-; MSVC-X86: calll @__security_check_cookie at 4
-; MSVC-X86: leal -8(%ebp), %esp
-; MSVC-X86: popl %esi
-; MSVC-X86: popl %ebp
-; MSVC-X86: retl
-
-; MSVC-X64-LABEL: test_vla_realign:
-; MSVC-X64: pushq %rbp
-; MSVC-X64: pushq %rbx
-; MSVC-X64: subq $32, %rsp
-; MSVC-X64: leaq 32(%rsp), %rbp
-; MSVC-X64: andq $-32, %rsp
-; MSVC-X64: movq %rsp, %rbx
-; MSVC-X64: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64: xorq %rbp, %[[REG1]]
-; MSVC-X64: movq %[[REG1]], [[SLOT:[0-9]*]](%rbx)
-; MSVC-X64: callq __chkstk
-; MSVC-X64: callq escape
-; MSVC-X64: movq [[SLOT]](%rbx), %rcx
-; MSVC-X64: xorq %rbp, %rcx
-; MSVC-X64: movq	__security_cookie(%rip), %rax
-; MSVC-X64: cmpq	%rcx, %rax
-; MSVC-X64: jne	.LBB2_2
-; MSVC-X64: retq
-; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: int3
-
 
 declare ptr @strcpy(ptr, ptr) nounwind
 

>From 06fbe6a8089f0df04077f9a35d775a78f873b780 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Thu, 27 Jun 2024 10:47:48 -0700
Subject: [PATCH 06/12] update testcase broken in merge

---
 llvm/test/CodeGen/X86/stack-protector-msvc.ll | 402 +++++++++++++++---
 1 file changed, 332 insertions(+), 70 deletions(-)

diff --git a/llvm/test/CodeGen/X86/stack-protector-msvc.ll b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
index f03fbeb9d1fe2..d718062d2c485 100644
--- a/llvm/test/CodeGen/X86/stack-protector-msvc.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-msvc.ll
@@ -9,6 +9,100 @@
 @"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00"    ; <ptr> [#uses=1]
 
 define void @test(ptr %a) nounwind ssp {
+; MSVC-X86-LABEL: test:
+; MSVC-X86:       # %bb.0: # %entry
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    subl $12, %esp
+; MSVC-X86-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-NEXT:    xorl %esp, %eax
+; MSVC-X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    movl %esp, %esi
+; MSVC-X86-NEXT:    pushl {{[0-9]+}}(%esp)
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    calll _strcpy
+; MSVC-X86-NEXT:    addl $8, %esp
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    pushl $LC
+; MSVC-X86-NEXT:    calll _printf
+; MSVC-X86-NEXT:    addl $8, %esp
+; MSVC-X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-NEXT:    xorl %esp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    addl $12, %esp
+; MSVC-X86-NEXT:    popl %esi
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test:
+; MSVC-X64:       # %bb.0: # %entry
+; MSVC-X64-NEXT:    pushq %rsi
+; MSVC-X64-NEXT:    subq $64, %rsp
+; MSVC-X64-NEXT:    movq %rcx, %rdx
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rsp, %rax
+; MSVC-X64-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rsi
+; MSVC-X64-NEXT:    movq %rsi, %rcx
+; MSVC-X64-NEXT:    callq strcpy
+; MSVC-X64-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-NEXT:    movq %rsi, %rdx
+; MSVC-X64-NEXT:    callq printf
+; MSVC-X64-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB0_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    addq $64, %rsp
+; MSVC-X64-NEXT:    popq %rsi
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB0_2:
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test:
+; MSVC-X86-O0:       # %bb.0: # %entry
+; MSVC-X86-O0-NEXT:    subl $20, %esp
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %eax
+; MSVC-X86-O0-NEXT:    xorl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, 4(%eax)
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, (%eax)
+; MSVC-X86-O0-NEXT:    calll _strcpy
+; MSVC-X86-O0-NEXT:    leal LC, %ecx
+; MSVC-X86-O0-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; MSVC-X86-O0-NEXT:    movl %ecx, (%esp)
+; MSVC-X86-O0-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; MSVC-X86-O0-NEXT:    calll _printf
+; MSVC-X86-O0-NEXT:  # %bb.1: # %return
+; MSVC-X86-O0-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %esp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    addl $20, %esp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test:
+; MSVC-X64-O0:       # %bb.0: # %entry
+; MSVC-X64-O0-NEXT:    subq $56, %rsp
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    callq strcpy
+; MSVC-X64-O0-NEXT:    leaq LC(%rip), %rcx
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; MSVC-X64-O0-NEXT:    callq printf
+; MSVC-X64-O0-NEXT:  # %bb.1: # %return
+; MSVC-X64-O0-NEXT:    movq {{[0-9]+}}(%rsp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    addq $56, %rsp
+; MSVC-X64-O0-NEXT:    retq
 entry:
  %a_addr = alloca ptr    ; <ptr> [#uses=2]
  %buf = alloca [8 x i8]    ; <ptr> [#uses=2]
@@ -25,47 +119,254 @@ return:    ; preds = %entry
 declare void @escape(ptr)
 
 define void @test_vla(i32 %n) nounwind ssp {
+; MSVC-X86-LABEL: test_vla:
+; MSVC-X86:       # %bb.0:
+; MSVC-X86-NEXT:    pushl %ebp
+; MSVC-X86-NEXT:    movl %esp, %ebp
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    movl %ecx, -4(%ebp)
+; MSVC-X86-NEXT:    shll $2, %eax
+; MSVC-X86-NEXT:    calll __chkstk
+; MSVC-X86-NEXT:    movl %esp, %eax
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    movl %ebp, %esp
+; MSVC-X86-NEXT:    popl %ebp
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test_vla:
+; MSVC-X64:       # %bb.0:
+; MSVC-X64-NEXT:    pushq %rbp
+; MSVC-X64-NEXT:    subq $16, %rsp
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq %rax, -8(%rbp)
+; MSVC-X64-NEXT:    movl %ecx, %eax
+; MSVC-X64-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-NEXT:    andq $-16, %rax
+; MSVC-X64-NEXT:    callq __chkstk
+; MSVC-X64-NEXT:    subq %rax, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rcx
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB1_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    movq %rbp, %rsp
+; MSVC-X64-NEXT:    popq %rbp
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB1_2:
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test_vla:
+; MSVC-X86-O0:       # %bb.0:
+; MSVC-X86-O0-NEXT:    pushl %ebp
+; MSVC-X86-O0-NEXT:    movl %esp, %ebp
+; MSVC-X86-O0-NEXT:    pushl %eax
+; MSVC-X86-O0-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, -4(%ebp)
+; MSVC-X86-O0-NEXT:    shll $2, %eax
+; MSVC-X86-O0-NEXT:    calll __chkstk
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    subl $4, %esp
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    addl $4, %esp
+; MSVC-X86-O0-NEXT:    movl -4(%ebp), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    movl %ebp, %esp
+; MSVC-X86-O0-NEXT:    popl %ebp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test_vla:
+; MSVC-X64-O0:       # %bb.0:
+; MSVC-X64-O0-NEXT:    pushq %rbp
+; MSVC-X64-O0-NEXT:    subq $16, %rsp
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, -8(%rbp)
+; MSVC-X64-O0-NEXT:    movl %ecx, %eax
+; MSVC-X64-O0-NEXT:    # kill: def $rax killed $eax
+; MSVC-X64-O0-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-O0-NEXT:    andq $-16, %rax
+; MSVC-X64-O0-NEXT:    callq __chkstk
+; MSVC-X64-O0-NEXT:    subq %rax, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    addq $32, %rsp
+; MSVC-X64-O0-NEXT:    movq -8(%rbp), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    movq %rbp, %rsp
+; MSVC-X64-O0-NEXT:    popq %rbp
+; MSVC-X64-O0-NEXT:    retq
   %vla = alloca i32, i32 %n
   call void @escape(ptr %vla)
   ret void
 }
 
-; MSVC-X86-LABEL: _test_vla:
-; MSVC-X86: pushl %ebp
-; MSVC-X86: movl %esp, %ebp
-; MSVC-X86: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86: xorl %ebp, %[[REG1]]
-; MSVC-X86: movl %[[REG1]], [[SLOT:-[0-9]*]](%ebp)
-; MSVC-X86: calll __chkstk
-; MSVC-X86: pushl
-; MSVC-X86: calll _escape
-; MSVC-X86: movl [[SLOT]](%ebp), %ecx
-; MSVC-X86: xorl %ebp, %ecx
-; MSVC-X86: calll @__security_check_cookie at 4
-; MSVC-X86: movl %ebp, %esp
-; MSVC-X86: popl %ebp
-; MSVC-X86: retl
-
-; MSVC-X64-LABEL: test_vla:
-; MSVC-X64: pushq %rbp
-; MSVC-X64: subq $16, %rsp
-; MSVC-X64: leaq 16(%rsp), %rbp
-; MSVC-X64: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64: xorq %rbp, %[[REG1]]
-; MSVC-X64: movq %[[REG1]], [[SLOT:-[0-9]*]](%rbp)
-; MSVC-X64: callq __chkstk
-; MSVC-X64: callq escape
-; MSVC-X64: movq [[SLOT]](%rbp), %rcx
-; MSVC-X64: xorq %rbp, %rcx
-; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: retq
-
-
 ; This case is interesting because we address local variables with RBX but XOR
 ; the guard value with RBP. That's fine, either value will do, as long as they
 ; are the same across the life of the frame.
 
 define void @test_vla_realign(i32 %n) nounwind ssp {
+; MSVC-X86-LABEL: test_vla_realign:
+; MSVC-X86:       # %bb.0:
+; MSVC-X86-NEXT:    pushl %ebp
+; MSVC-X86-NEXT:    movl %esp, %ebp
+; MSVC-X86-NEXT:    pushl %edi
+; MSVC-X86-NEXT:    pushl %esi
+; MSVC-X86-NEXT:    andl $-32, %esp
+; MSVC-X86-NEXT:    subl $32, %esp
+; MSVC-X86-NEXT:    movl %esp, %esi
+; MSVC-X86-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    movl %ecx, 12(%esi)
+; MSVC-X86-NEXT:    shll $2, %eax
+; MSVC-X86-NEXT:    calll __chkstk
+; MSVC-X86-NEXT:    movl %esp, %edi
+; MSVC-X86-NEXT:    movl %esi, %eax
+; MSVC-X86-NEXT:    pushl %eax
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    pushl %edi
+; MSVC-X86-NEXT:    calll _escape
+; MSVC-X86-NEXT:    addl $4, %esp
+; MSVC-X86-NEXT:    movl 12(%esi), %ecx
+; MSVC-X86-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-NEXT:    leal -8(%ebp), %esp
+; MSVC-X86-NEXT:    popl %esi
+; MSVC-X86-NEXT:    popl %edi
+; MSVC-X86-NEXT:    popl %ebp
+; MSVC-X86-NEXT:    retl
+;
+; MSVC-X64-LABEL: test_vla_realign:
+; MSVC-X64:       # %bb.0:
+; MSVC-X64-NEXT:    pushq %rbp
+; MSVC-X64-NEXT:    pushq %rsi
+; MSVC-X64-NEXT:    pushq %rbx
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-NEXT:    andq $-32, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rbx
+; MSVC-X64-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-NEXT:    xorq %rbp, %rax
+; MSVC-X64-NEXT:    movq %rax, 24(%rbx)
+; MSVC-X64-NEXT:    movl %ecx, %eax
+; MSVC-X64-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-NEXT:    andq $-16, %rax
+; MSVC-X64-NEXT:    callq __chkstk
+; MSVC-X64-NEXT:    subq %rax, %rsp
+; MSVC-X64-NEXT:    movq %rsp, %rsi
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    movq %rbx, %rcx
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    movq %rsi, %rcx
+; MSVC-X64-NEXT:    callq escape
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    movq 24(%rbx), %rcx
+; MSVC-X64-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-NEXT:    cmpq __security_cookie(%rip), %rcx
+; MSVC-X64-NEXT:    jne .LBB2_2
+; MSVC-X64-NEXT:  # %bb.1:
+; MSVC-X64-NEXT:    movq %rbp, %rsp
+; MSVC-X64-NEXT:    popq %rbx
+; MSVC-X64-NEXT:    popq %rsi
+; MSVC-X64-NEXT:    popq %rbp
+; MSVC-X64-NEXT:    retq
+; MSVC-X64-NEXT:  .LBB2_2:
+; MSVC-X64-NEXT:    subq $32, %rsp
+; MSVC-X64-NEXT:    callq __security_check_cookie
+; MSVC-X64-NEXT:    addq $32, %rsp
+; MSVC-X64-NEXT:    int3
+;
+; MSVC-X86-O0-LABEL: test_vla_realign:
+; MSVC-X86-O0:       # %bb.0:
+; MSVC-X86-O0-NEXT:    pushl %ebp
+; MSVC-X86-O0-NEXT:    movl %esp, %ebp
+; MSVC-X86-O0-NEXT:    pushl %esi
+; MSVC-X86-O0-NEXT:    andl $-32, %esp
+; MSVC-X86-O0-NEXT:    subl $64, %esp
+; MSVC-X86-O0-NEXT:    movl %esp, %esi
+; MSVC-X86-O0-NEXT:    movl 8(%ebp), %eax
+; MSVC-X86-O0-NEXT:    movl ___security_cookie, %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    movl %ecx, 48(%esi)
+; MSVC-X86-O0-NEXT:    shll $2, %eax
+; MSVC-X86-O0-NEXT:    calll __chkstk
+; MSVC-X86-O0-NEXT:    movl %esp, %eax
+; MSVC-X86-O0-NEXT:    movl %eax, 28(%esi) # 4-byte Spill
+; MSVC-X86-O0-NEXT:    leal 32(%esi), %eax
+; MSVC-X86-O0-NEXT:    subl $4, %esp
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    movl 28(%esi), %eax # 4-byte Reload
+; MSVC-X86-O0-NEXT:    movl %eax, (%esp)
+; MSVC-X86-O0-NEXT:    calll _escape
+; MSVC-X86-O0-NEXT:    addl $4, %esp
+; MSVC-X86-O0-NEXT:    movl 48(%esi), %ecx
+; MSVC-X86-O0-NEXT:    xorl %ebp, %ecx
+; MSVC-X86-O0-NEXT:    calll @__security_check_cookie at 4
+; MSVC-X86-O0-NEXT:    leal -4(%ebp), %esp
+; MSVC-X86-O0-NEXT:    popl %esi
+; MSVC-X86-O0-NEXT:    popl %ebp
+; MSVC-X86-O0-NEXT:    retl
+;
+; MSVC-X64-O0-LABEL: test_vla_realign:
+; MSVC-X64-O0:       # %bb.0:
+; MSVC-X64-O0-NEXT:    pushq %rbp
+; MSVC-X64-O0-NEXT:    pushq %rbx
+; MSVC-X64-O0-NEXT:    subq $72, %rsp
+; MSVC-X64-O0-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; MSVC-X64-O0-NEXT:    andq $-32, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rbx
+; MSVC-X64-O0-NEXT:    movq __security_cookie(%rip), %rax
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, 64(%rbx)
+; MSVC-X64-O0-NEXT:    movl %ecx, %eax
+; MSVC-X64-O0-NEXT:    # kill: def $rax killed $eax
+; MSVC-X64-O0-NEXT:    leaq 15(,%rax,4), %rax
+; MSVC-X64-O0-NEXT:    andq $-16, %rax
+; MSVC-X64-O0-NEXT:    callq __chkstk
+; MSVC-X64-O0-NEXT:    subq %rax, %rsp
+; MSVC-X64-O0-NEXT:    movq %rsp, %rax
+; MSVC-X64-O0-NEXT:    movq %rax, 24(%rbx) # 8-byte Spill
+; MSVC-X64-O0-NEXT:    leaq 32(%rbx), %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    movq 24(%rbx), %rcx # 8-byte Reload
+; MSVC-X64-O0-NEXT:    callq escape
+; MSVC-X64-O0-NEXT:    addq $32, %rsp
+; MSVC-X64-O0-NEXT:    movq 64(%rbx), %rcx
+; MSVC-X64-O0-NEXT:    xorq %rbp, %rcx
+; MSVC-X64-O0-NEXT:    subq $32, %rsp
+; MSVC-X64-O0-NEXT:    callq __security_check_cookie
+; MSVC-X64-O0-NEXT:    leaq 8(%rbp), %rsp
+; MSVC-X64-O0-NEXT:    popq %rbx
+; MSVC-X64-O0-NEXT:    popq %rbp
+; MSVC-X64-O0-NEXT:    retq
   %realign = alloca i32, align 32
   %vla = alloca i32, i32 %n
   call void @escape(ptr %realign)
@@ -73,45 +374,6 @@ define void @test_vla_realign(i32 %n) nounwind ssp {
   ret void
 }
 
-; MSVC-X86-LABEL: _test_vla_realign:
-; MSVC-X86: pushl %ebp
-; MSVC-X86: movl %esp, %ebp
-; MSVC-X86: pushl %esi
-; MSVC-X86: andl $-32, %esp
-; MSVC-X86: subl $32, %esp
-; MSVC-X86: movl %esp, %esi
-; MSVC-X86: movl ___security_cookie, %[[REG1:[^ ]*]]
-; MSVC-X86: xorl %ebp, %[[REG1]]
-; MSVC-X86: movl %[[REG1]], [[SLOT:[0-9]*]](%esi)
-; MSVC-X86: calll __chkstk
-; MSVC-X86: pushl
-; MSVC-X86: calll _escape
-; MSVC-X86: movl [[SLOT]](%esi), %ecx
-; MSVC-X86: xorl %ebp, %ecx
-; MSVC-X86: calll @__security_check_cookie at 4
-; MSVC-X86: leal -8(%ebp), %esp
-; MSVC-X86: popl %esi
-; MSVC-X86: popl %ebp
-; MSVC-X86: retl
-
-; MSVC-X64-LABEL: test_vla_realign:
-; MSVC-X64: pushq %rbp
-; MSVC-X64: pushq %rbx
-; MSVC-X64: subq $32, %rsp
-; MSVC-X64: leaq 32(%rsp), %rbp
-; MSVC-X64: andq $-32, %rsp
-; MSVC-X64: movq %rsp, %rbx
-; MSVC-X64: movq __security_cookie(%rip), %[[REG1:[^ ]*]]
-; MSVC-X64: xorq %rbp, %[[REG1]]
-; MSVC-X64: movq %[[REG1]], [[SLOT:[0-9]*]](%rbx)
-; MSVC-X64: callq __chkstk
-; MSVC-X64: callq escape
-; MSVC-X64: movq [[SLOT]](%rbx), %rcx
-; MSVC-X64: xorq %rbp, %rcx
-; MSVC-X64: callq __security_check_cookie
-; MSVC-X64: retq
-
-
 declare ptr @strcpy(ptr, ptr) nounwind
 
 declare i32 @printf(ptr, ...) nounwind

>From 9dae0613c5be03a5338c4c6841ea73a3751c542f Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Thu, 27 Jun 2024 11:22:41 -0700
Subject: [PATCH 07/12] remove spaces added by merge/cleanup

---
 llvm/test/CodeGen/X86/tailcc-ssp.ll | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/X86/tailcc-ssp.ll b/llvm/test/CodeGen/X86/tailcc-ssp.ll
index 1328e0cfa7124..81b6c9882fd99 100644
--- a/llvm/test/CodeGen/X86/tailcc-ssp.ll
+++ b/llvm/test/CodeGen/X86/tailcc-ssp.ll
@@ -48,8 +48,6 @@ define tailcc void @tailcall_frame(ptr %0, i64 %1) sspreq {
 ; LINUX-NEXT:    .cfi_def_cfa_offset 32
 ; LINUX-NEXT:    callq __stack_chk_fail at PLT
 
-
-
    tail call tailcc void @h(ptr null, i64 0, ptr null)
    ret void
 }
@@ -76,7 +74,7 @@ define void @tailcall_unrelated_frame() sspreq {
 ; WINDOWS-NEXT:    callq __security_check_cookie
 ; WINDOWS-NEXT:    int3
 ; WINDOWS-NEXT:    .seh_endproc
-;
+
 ; LINUX-LABEL: tailcall_unrelated_frame:
 ; LINUX:       # %bb.0:
 ; LINUX-NEXT:    pushq %rax
@@ -95,7 +93,6 @@ define void @tailcall_unrelated_frame() sspreq {
 ; LINUX-NEXT:    .cfi_def_cfa_offset 16
 ; LINUX-NEXT:    callq __stack_chk_fail at PLT
 
-
   call void @bar()
   tail call void @bar()
   ret void

>From 64dccfae3121d2b102a68ae4817c23af72884e0e Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Sun, 30 Jun 2024 22:16:09 -0700
Subject: [PATCH 08/12] update coding conv

---
 llvm/lib/Target/X86/X86FixupStackProtector.cpp | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/X86/X86FixupStackProtector.cpp b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
index d8c31e544d348..b1d0d19317a10 100644
--- a/llvm/lib/Target/X86/X86FixupStackProtector.cpp
+++ b/llvm/lib/Target/X86/X86FixupStackProtector.cpp
@@ -78,7 +78,7 @@ X86FixupStackProtectorPass::getSecurityCheckerBasicBlock(MachineFunction &MF) {
   MachineBasicBlock::reverse_iterator RBegin, REnd;
 
   for (auto &MBB : llvm::reverse(MF)) {
-    for (RBegin = MBB.rbegin(), REnd = MBB.rend(); RBegin != REnd; RBegin++) {
+    for (RBegin = MBB.rbegin(), REnd = MBB.rend(); RBegin != REnd; ++RBegin) {
       auto &MI = *RBegin;
       if (MI.getOpcode() == X86::CALL64pcrel32 &&
           MI.getNumExplicitOperands() == 1) {
@@ -104,22 +104,22 @@ void X86FixupStackProtectorPass::getGuardCheckSequence(
   MachineBasicBlock::reverse_iterator DIt(CheckCall);
   // Seq From StackUp to Stack Down Is fixed.
   // ADJCALLSTACKUP64
-  UIt++;
+  ++UIt;
   SeqMI[4] = &*UIt;
 
   // CALL __security_check_cookie
   SeqMI[3] = CheckCall;
 
   // COPY function slot cookie
-  DIt++;
+  ++DIt;
   SeqMI[2] = &*DIt;
 
   // ADJCALLSTACKDOWN64
-  DIt++;
+  ++DIt;
   SeqMI[1] = &*DIt;
 
   MachineBasicBlock::reverse_iterator XIt(SeqMI[1]);
-  for (; XIt != CurMBB->rbegin(); XIt++) {
+  for (; XIt != CurMBB->rbegin(); ++XIt) {
     auto &CI = *XIt;
     if ((CI.getOpcode() == X86::XOR64_FP) || (CI.getOpcode() == X86::XOR32_FP))
       break;
@@ -142,7 +142,7 @@ X86FixupStackProtectorPass::CreateFailCheckSequence(MachineBasicBlock *CurMBB,
 
   MachineInstr *GuardXor = SeqMI[0];
   MachineBasicBlock::iterator InsertPt(GuardXor);
-  InsertPt++;
+  ++InsertPt;
 
   // Compare security_Cookie with XOR_Val, if not same, we have violation
   auto CMI = BuildMI(*CurMBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rm))
@@ -216,13 +216,13 @@ bool X86FixupStackProtectorPass::runOnMachineFunction(MachineFunction &MF) {
   // After Inserting JMP_1, we can not have two terminators
   // in same block, split CurrentMBB after JMP_1
   MachineBasicBlock::iterator SplitIt(SeqMI[4]);
-  SplitIt++;
+  ++SplitIt;
   SplitBasicBlock(CurMBB, NewRetMBB, SplitIt);
 
   // Fill up Failure Routine, move Fail Check Squence from CurMBB to FailMBB
   MachineBasicBlock::iterator U1It(SeqMI[1]);
   MachineBasicBlock::iterator U2It(SeqMI[4]);
-  U2It++;
+  ++U2It;
   FailMBB->splice(FailMBB->end(), CurMBB, U1It, U2It);
   BuildMI(*FailMBB, FailMBB->end(), DebugLoc(), TII->get(X86::INT3));
 
@@ -230,7 +230,7 @@ bool X86FixupStackProtectorPass::runOnMachineFunction(MachineFunction &MF) {
   // from Current Basic BLocks into New Return Block
   JMI.addMBB(NewRetMBB);
   MachineBasicBlock::iterator SplicePt(JMI.getInstr());
-  SplicePt++;
+  ++SplicePt;
   if (SplicePt != CurMBB->end())
     NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplicePt);
 

>From 5059849ff8cb471a306e57c2aa1e5fc8d9c1c615 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Mon, 1 Jul 2024 12:37:09 -0700
Subject: [PATCH 09/12] update naming to X86 Windows related terms

---
 llvm/lib/Target/X86/CMakeLists.txt            |  2 +-
 llvm/lib/Target/X86/X86.h                     |  6 +-
 llvm/lib/Target/X86/X86TargetMachine.cpp      |  2 +-
 ...cpp => X86WinFixupBufferSecurityCheck.cpp} | 55 ++++++++++---------
 4 files changed, 35 insertions(+), 30 deletions(-)
 rename llvm/lib/Target/X86/{X86FixupStackProtector.cpp => X86WinFixupBufferSecurityCheck.cpp} (80%)

diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 5303758ff8a2e..02770b134d26c 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -48,7 +48,7 @@ set(sources
   X86AvoidStoreForwardingBlocks.cpp
   X86DynAllocaExpander.cpp
   X86FixupSetCC.cpp
-  X86FixupStackProtector.cpp
+  X86WinFixupBufferSecurityCheck.cpp
   X86FlagsCopyLowering.cpp
   X86FloatingPoint.cpp
   X86FrameLowering.cpp
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index b4432f45987cd..d6e0d5e3a3b2c 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -73,8 +73,8 @@ FunctionPass *createX86OptimizeLEAs();
 /// Return a pass that transforms setcc + movzx pairs into xor + setcc.
 FunctionPass *createX86FixupSetCC();
 
-/// Return a pass that transform inline stack protector into seperate bb
-FunctionPass *createX86FixupStackProtectorPass();
+/// Return a pass that transform inline buffer security check into seperate bb
+FunctionPass *createX86WinFixupBufferSecurityCheckPass();
 
 /// Return a pass that avoids creating store forward block issues in the hardware.
 FunctionPass *createX86AvoidStoreForwardingBlocks();
@@ -189,7 +189,7 @@ void initializeX86ExpandPseudoPass(PassRegistry &);
 void initializeX86FastPreTileConfigPass(PassRegistry &);
 void initializeX86FastTileConfigPass(PassRegistry &);
 void initializeX86FixupSetCCPassPass(PassRegistry &);
-void initializeX86FixupStackProtectorPassPass(PassRegistry &);
+void initializeX86WinFixupBufferSecurityCheckPassPass(PassRegistry &);
 void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
 void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index b245e80ad18dc..4c77f40fd32a3 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -550,7 +550,7 @@ bool X86PassConfig::addPreISel() {
 void X86PassConfig::addPreRegAlloc() {
   if (getOptLevel() != CodeGenOptLevel::None) {
     addPass(&LiveRangeShrinkID);
-    addPass(createX86FixupStackProtectorPass());
+    addPass(createX86WinFixupBufferSecurityCheckPass());
     addPass(createX86FixupSetCC());
     addPass(createX86OptimizeLEAs());
     addPass(createX86CallFrameOptimization());
diff --git a/llvm/lib/Target/X86/X86FixupStackProtector.cpp b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
similarity index 80%
rename from llvm/lib/Target/X86/X86FixupStackProtector.cpp
rename to llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
index b1d0d19317a10..a5cea35f788e7 100644
--- a/llvm/lib/Target/X86/X86FixupStackProtector.cpp
+++ b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
@@ -1,14 +1,15 @@
-//===---- X86FixupStackProtector.cpp Fix Stack Protector Call ----------===//
+//===---- X86FixupBufferSecurityCheck.cpp Fix Buffer Security Check Call
+//----------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-// Stack Protector implementation inserts platform specific callback into code.
-// For windows __security_check_cookie call gets call everytime function is
-// return without fixup. Since this function is defined in runtime library, it
-// incures cost of call in dll which simply does comparison and returns most
+// Buffer Security Check implementation inserts platform specific callback into
+// code. For windows __security_check_cookie call gets call everytime function
+// is return without fixup. Since this function is defined in runtime library,
+// it incures cost of call in dll which simply does comparison and returns most
 // time. With Fixup, We selective move to call in DLL only if comparison fails.
 //===----------------------------------------------------------------------===//
 
@@ -25,17 +26,19 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "x86-fixup-spcall"
+#define DEBUG_TYPE "x86-fixup-bscheck"
 
 namespace {
 
-class X86FixupStackProtectorPass : public MachineFunctionPass {
+class X86WinFixupBufferSecurityCheckPass : public MachineFunctionPass {
 public:
   static char ID;
 
-  X86FixupStackProtectorPass() : MachineFunctionPass(ID) {}
+  X86WinFixupBufferSecurityCheckPass() : MachineFunctionPass(ID) {}
 
-  StringRef getPassName() const override { return "X86 Fixup Stack Protector"; }
+  StringRef getPassName() const override {
+    return "X86 Fixup Buffer Security Check";
+  }
 
   bool runOnMachineFunction(MachineFunction &MF) override;
 
@@ -44,7 +47,7 @@ class X86FixupStackProtectorPass : public MachineFunctionPass {
 
   void getGuardCheckSequence(MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
                              MachineInstr *SeqMI[5]);
- 
+
   void SplitBasicBlock(MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
                        MachineBasicBlock::iterator SplitIt);
 
@@ -58,23 +61,24 @@ class X86FixupStackProtectorPass : public MachineFunctionPass {
 };
 } // end anonymous namespace
 
-char X86FixupStackProtectorPass::ID = 0;
+char X86WinFixupBufferSecurityCheckPass::ID = 0;
 
-INITIALIZE_PASS(X86FixupStackProtectorPass, DEBUG_TYPE, DEBUG_TYPE, false,
-                false)
+INITIALIZE_PASS(X86WinFixupBufferSecurityCheckPass, DEBUG_TYPE, DEBUG_TYPE,
+                false, false)
 
-FunctionPass *llvm::createX86FixupStackProtectorPass() {
-  return new X86FixupStackProtectorPass();
+FunctionPass *llvm::createX86WinFixupBufferSecurityCheckPass() {
+  return new X86WinFixupBufferSecurityCheckPass();
 }
 
-void X86FixupStackProtectorPass::SplitBasicBlock(
+void X86WinFixupBufferSecurityCheckPass::SplitBasicBlock(
     MachineBasicBlock *CurMBB, MachineBasicBlock *NewRetMBB,
     MachineBasicBlock::iterator SplitIt) {
   NewRetMBB->splice(NewRetMBB->end(), CurMBB, SplitIt, CurMBB->end());
 }
 
 std::pair<MachineBasicBlock *, MachineInstr *>
-X86FixupStackProtectorPass::getSecurityCheckerBasicBlock(MachineFunction &MF) {
+X86WinFixupBufferSecurityCheckPass::getSecurityCheckerBasicBlock(
+    MachineFunction &MF) {
   MachineBasicBlock::reverse_iterator RBegin, REnd;
 
   for (auto &MBB : llvm::reverse(MF)) {
@@ -96,7 +100,7 @@ X86FixupStackProtectorPass::getSecurityCheckerBasicBlock(MachineFunction &MF) {
   return std::make_pair(nullptr, nullptr);
 }
 
-void X86FixupStackProtectorPass::getGuardCheckSequence(
+void X86WinFixupBufferSecurityCheckPass::getGuardCheckSequence(
     MachineBasicBlock *CurMBB, MachineInstr *CheckCall,
     MachineInstr *SeqMI[5]) {
 
@@ -128,9 +132,9 @@ void X86FixupStackProtectorPass::getGuardCheckSequence(
 }
 
 std::pair<MachineInstr *, MachineInstr *>
-X86FixupStackProtectorPass::CreateFailCheckSequence(MachineBasicBlock *CurMBB,
-                                                    MachineBasicBlock *FailMBB,
-                                                    MachineInstr *SeqMI[5]) {
+X86WinFixupBufferSecurityCheckPass::CreateFailCheckSequence(
+    MachineBasicBlock *CurMBB, MachineBasicBlock *FailMBB,
+    MachineInstr *SeqMI[5]) {
 
   auto MF = CurMBB->getParent();
 
@@ -162,13 +166,13 @@ X86FixupStackProtectorPass::CreateFailCheckSequence(MachineBasicBlock *CurMBB,
   return std::make_pair(CMI.getInstr(), JMI.getInstr());
 }
 
-void X86FixupStackProtectorPass::FinishBlock(MachineBasicBlock *MBB) {
+void X86WinFixupBufferSecurityCheckPass::FinishBlock(MachineBasicBlock *MBB) {
   LivePhysRegs LiveRegs;
   computeAndAddLiveIns(LiveRegs, *MBB);
 }
 
-void X86FixupStackProtectorPass::FinishFunction(MachineBasicBlock *FailMBB,
-                                                MachineBasicBlock *NewRetMBB) {
+void X86WinFixupBufferSecurityCheckPass::FinishFunction(
+    MachineBasicBlock *FailMBB, MachineBasicBlock *NewRetMBB) {
   FailMBB->getParent()->RenumberBlocks();
   // FailMBB includes call to MSCV RT  where is __security_check_cookie
   // function is called. This function uses regcall and it expects cookie
@@ -179,7 +183,8 @@ void X86FixupStackProtectorPass::FinishFunction(MachineBasicBlock *FailMBB,
   FinishBlock(NewRetMBB);
 }
 
-bool X86FixupStackProtectorPass::runOnMachineFunction(MachineFunction &MF) {
+bool X86WinFixupBufferSecurityCheckPass::runOnMachineFunction(
+    MachineFunction &MF) {
   bool Changed = false;
   const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
 

>From bcdfdde19b1b205e0031d4e037116e9913c9f69f Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Tue, 2 Jul 2024 04:55:34 -0700
Subject: [PATCH 10/12] update comments and sorted file list

---
 llvm/lib/Target/X86/CMakeLists.txt                     | 2 +-
 llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp | 3 +--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 02770b134d26c..9553a8619feb5 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -48,7 +48,6 @@ set(sources
   X86AvoidStoreForwardingBlocks.cpp
   X86DynAllocaExpander.cpp
   X86FixupSetCC.cpp
-  X86WinFixupBufferSecurityCheck.cpp
   X86FlagsCopyLowering.cpp
   X86FloatingPoint.cpp
   X86FrameLowering.cpp
@@ -84,6 +83,7 @@ set(sources
   X86TargetTransformInfo.cpp
   X86VZeroUpper.cpp
   X86WinEHState.cpp
+  X86WinFixupBufferSecurityCheck.cpp
   X86InsertWait.cpp
   GISel/X86CallLowering.cpp
   GISel/X86InstructionSelector.cpp
diff --git a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
index a5cea35f788e7..69271a89695fd 100644
--- a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
+++ b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
@@ -1,5 +1,4 @@
-//===---- X86FixupBufferSecurityCheck.cpp Fix Buffer Security Check Call
-//----------===//
+//===---- X86FixupBufferSecurityCheck.cpp Fix Buffer Security Check Call---===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.

>From 0345f5733dc04ad9abe185f32c1f3aa5b6c32957 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Tue, 2 Jul 2024 11:00:26 -0700
Subject: [PATCH 11/12] fix infra testcase fail

---
 llvm/test/CodeGen/X86/opt-pipeline.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index 631f955ee6cc0..b687085964abb 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -119,7 +119,7 @@
 ; CHECK-NEXT:       Peephole Optimizations
 ; CHECK-NEXT:       Remove dead machine instructions
 ; CHECK-NEXT:       Live Range Shrink
-; CHECK-NEXT:       X86 Fixup Stack Protector
+; CHECK-NEXT:       X86 Fixup Buffer Security Check
 ; CHECK-NEXT:       X86 Fixup SetCC
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       X86 LEA Optimize

>From d717f6448b40753b4feefdcbee388b505a6affb4 Mon Sep 17 00:00:00 2001
From: mahesh-attarde <mahesh.attarde at intel.com>
Date: Wed, 3 Jul 2024 08:09:14 -0700
Subject: [PATCH 12/12] update missing win strubg

---
 llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp | 10 +++++-----
 llvm/test/CodeGen/X86/opt-pipeline.ll                  |  2 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
index 69271a89695fd..7101b0bd70312 100644
--- a/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
+++ b/llvm/lib/Target/X86/X86WinFixupBufferSecurityCheck.cpp
@@ -1,12 +1,12 @@
-//===---- X86FixupBufferSecurityCheck.cpp Fix Buffer Security Check Call---===//
+//===- X86WinFixupBufferSecurityCheck.cpp Fix Buffer Security Check Call -===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-// Buffer Security Check implementation inserts platform specific callback into
-// code. For windows __security_check_cookie call gets call everytime function
+// Buffer Security Check implementation inserts windows specific callback into
+// code. On windows, __security_check_cookie call gets call everytime function
 // is return without fixup. Since this function is defined in runtime library,
 // it incures cost of call in dll which simply does comparison and returns most
 // time. With Fixup, We selective move to call in DLL only if comparison fails.
@@ -25,7 +25,7 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "x86-fixup-bscheck"
+#define DEBUG_TYPE "x86-win-fixup-bscheck"
 
 namespace {
 
@@ -36,7 +36,7 @@ class X86WinFixupBufferSecurityCheckPass : public MachineFunctionPass {
   X86WinFixupBufferSecurityCheckPass() : MachineFunctionPass(ID) {}
 
   StringRef getPassName() const override {
-    return "X86 Fixup Buffer Security Check";
+    return "X86 Windows Fixup Buffer Security Check";
   }
 
   bool runOnMachineFunction(MachineFunction &MF) override;
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index b687085964abb..4009a75b0a9c5 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -119,7 +119,7 @@
 ; CHECK-NEXT:       Peephole Optimizations
 ; CHECK-NEXT:       Remove dead machine instructions
 ; CHECK-NEXT:       Live Range Shrink
-; CHECK-NEXT:       X86 Fixup Buffer Security Check
+; CHECK-NEXT:       X86 Windows Fixup Buffer Security Check
 ; CHECK-NEXT:       X86 Fixup SetCC
 ; CHECK-NEXT:       Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:       X86 LEA Optimize



More information about the llvm-commits mailing list