[llvm] [X86] Fix SLH crash on llvm.eh.sjlh.longjmp (PR #77959)
Nicholas Mosier via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 12 10:21:35 PST 2024
https://github.com/nmosier updated https://github.com/llvm/llvm-project/pull/77959
>From db4e5815d7f803e6310ae229b2248fce7238cda0 Mon Sep 17 00:00:00 2001
From: Nicholas Mosier <nmosier at stanford.edu>
Date: Fri, 12 Jan 2024 18:12:49 +0000
Subject: [PATCH] [X86] Fix SLH crash on llvm.eh.sjlh.longjmp
Fix #60081.
---
.../X86/X86SpeculativeLoadHardening.cpp | 2 +-
.../CodeGen/X86/speculative-load-hardening.ll | 31 +++++++++++++++++++
2 files changed, 32 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 6301285fe95457..58b2d4df9ee4e1 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1365,7 +1365,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
// could prune out subsequent loads.
if (EnablePostLoadHardening && X86InstrInfo::isDataInvariantLoad(MI) &&
!isEFLAGSDefLive(MI) && MI.getDesc().getNumDefs() == 1 &&
- MI.getOperand(0).isReg() &&
+ MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual() &&
canHardenRegister(MI.getOperand(0).getReg()) &&
!HardenedAddrRegs.count(BaseReg) &&
!HardenedAddrRegs.count(IndexReg)) {
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 0c47fcddc43af2..83eceda8743223 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -1161,3 +1161,34 @@ define void @idempotent_atomic(ptr %x) speculative_load_hardening {
%tmp = atomicrmw or ptr %x, i32 0 seq_cst
ret void
}
+
+; Make sure we don't crash on longjmps (PR60081).
+declare void @llvm.eh.sjlj.longjmp(ptr)
+define void @test_physical_register_defs(ptr %env) speculative_load_hardening {
+; X64-LABEL: test_physical_register_defs:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbp
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: .cfi_offset %rbp, -16
+; X64-NEXT: movq %rsp, %rax
+; X64-NEXT: movq $-1, %rcx
+; X64-NEXT: sarq $63, %rax
+; X64-NEXT: orq %rax, %rdi
+; X64-NEXT: movq (%rdi), %rbp
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 16(%rdi), %rsp
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: jmpq *%rcx
+;
+; X64-LFENCE-LABEL: test_physical_register_defs:
+; X64-LFENCE: # %bb.0:
+; X64-LFENCE-NEXT: pushq %rbp
+; X64-LFENCE-NEXT: .cfi_def_cfa_offset 16
+; X64-LFENCE-NEXT: .cfi_offset %rbp, -16
+; X64-LFENCE-NEXT: movq (%rdi), %rbp
+; X64-LFENCE-NEXT: movq 8(%rdi), %rax
+; X64-LFENCE-NEXT: movq 16(%rdi), %rsp
+; X64-LFENCE-NEXT: jmpq *%rax
+ call void @llvm.eh.sjlj.longjmp(ptr %env)
+ unreachable
+}
More information about the llvm-commits
mailing list