[llvm] [X86] Fix SLH crash on llvm.eh.sjlh.longjmp (PR #77959)

Nicholas Mosier via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 13 12:28:28 PST 2024


https://github.com/nmosier updated https://github.com/llvm/llvm-project/pull/77959

>From 61c15f97c43f7a946d61cd2c93b09fc7b2d0f5e7 Mon Sep 17 00:00:00 2001
From: Nicholas Mosier <nmosier at stanford.edu>
Date: Fri, 12 Jan 2024 18:12:49 +0000
Subject: [PATCH 1/2] [X86] Fix SLH crash on llvm.eh.sjlh.longjmp

Fix #60081.
---
 .../X86/X86SpeculativeLoadHardening.cpp       |  2 +-
 .../CodeGen/X86/speculative-load-hardening.ll | 31 +++++++++++++++++++
 2 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 6301285fe95457..58b2d4df9ee4e1 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1365,7 +1365,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // could prune out subsequent loads.
         if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
             !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
-            MI.getOperand(0).isReg() &&
+            MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual() &&
             canHardenRegister(MI.getOperand(0).getReg()) &&
             !HardenedAddrRegs.count(BaseReg) &&
             !HardenedAddrRegs.count(IndexReg)) {
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 0c47fcddc43af2..45da777ea4e2a2 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -1161,3 +1161,34 @@ define void @idempotent_atomic(ptr %x) speculative_load_hardening {
   %tmp = atomicrmw or ptr %x, i32 0 seq_cst
   ret void
 }
+
+; Make sure we don't crash on longjmps (PR60081).
+declare void @llvm.eh.sjlj.longjmp(ptr)
+define void @test_longjmp(ptr %env) speculative_load_hardening {
+; X64-LABEL: test_longjmp:
+; X64:       # %bb.0:
+; X64-NEXT:    pushq %rbp
+; X64-NEXT:    .cfi_def_cfa_offset 16
+; X64-NEXT:    .cfi_offset %rbp, -16
+; X64-NEXT:    movq %rsp, %rax
+; X64-NEXT:    movq $-1, %rcx
+; X64-NEXT:    sarq $63, %rax
+; X64-NEXT:    orq %rax, %rdi
+; X64-NEXT:    movq (%rdi), %rbp
+; X64-NEXT:    movq 8(%rdi), %rcx
+; X64-NEXT:    movq 16(%rdi), %rsp
+; X64-NEXT:    orq %rax, %rcx
+; X64-NEXT:    jmpq *%rcx
+;
+; X64-LFENCE-LABEL: test_longjmp:
+; X64-LFENCE:       # %bb.0:
+; X64-LFENCE-NEXT:    pushq %rbp
+; X64-LFENCE-NEXT:    .cfi_def_cfa_offset 16
+; X64-LFENCE-NEXT:    .cfi_offset %rbp, -16
+; X64-LFENCE-NEXT:    movq (%rdi), %rbp
+; X64-LFENCE-NEXT:    movq 8(%rdi), %rax
+; X64-LFENCE-NEXT:    movq 16(%rdi), %rsp
+; X64-LFENCE-NEXT:    jmpq *%rax
+  call void @llvm.eh.sjlj.longjmp(ptr %env)
+  unreachable
+}

>From ef2bbf1b866ed902e3e0289941cf4a2b3243aa25 Mon Sep 17 00:00:00 2001
From: Nicholas Mosier <nmosier at stanford.edu>
Date: Sat, 13 Jan 2024 20:27:43 +0000
Subject: [PATCH 2/2] Move Reg.isVirtual() check into canHardenRegister() and
 remove redundant checks/assertions

---
 llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 58b2d4df9ee4e1..cff071c4f24b37 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1365,7 +1365,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
         // could prune out subsequent loads.
         if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
             !isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
-            MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual() &&
+            MI.getOperand(0).isReg() &&
             canHardenRegister(MI.getOperand(0).getReg()) &&
             !HardenedAddrRegs.count(BaseReg) &&
             !HardenedAddrRegs.count(IndexReg)) {
@@ -1840,7 +1840,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
       // just bail. Also check that its register class is one of the ones we
       // can harden.
       Register UseDefReg = UseMI.getOperand(0).getReg();
-      if (!UseDefReg.isVirtual() || !canHardenRegister(UseDefReg))
+      if (!canHardenRegister(UseDefReg))
         return {};
 
       SingleUseMI = &UseMI;
@@ -1863,6 +1863,10 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
 }
 
 bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) {
+  // We only support hardening virtual registers.
+  if (!Reg.isVirtual())
+    return false;
+
   auto *RC = MRI->getRegClass(Reg);
   int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
   if (RegBytes > 8)
@@ -1909,7 +1913,6 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
     Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
     const DebugLoc &Loc) {
   assert(canHardenRegister(Reg) && "Cannot harden this register!");
-  assert(Reg.isVirtual() && "Cannot harden a physical register!");
 
   auto *RC = MRI->getRegClass(Reg);
   int Bytes = TRI->getRegSizeInBits(*RC) / 8;



More information about the llvm-commits mailing list