[llvm] 49138d9 - [X86] Fix SLH crash on llvm.eh.sjlh.longjmp (#77959)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Jan 13 20:03:22 PST 2024
Author: Nicholas Mosier
Date: 2024-01-14T12:03:18+08:00
New Revision: 49138d97c0d8a6d1c6935da414a1f3fea839263b
URL: https://github.com/llvm/llvm-project/commit/49138d97c0d8a6d1c6935da414a1f3fea839263b
DIFF: https://github.com/llvm/llvm-project/commit/49138d97c0d8a6d1c6935da414a1f3fea839263b.diff
LOG: [X86] Fix SLH crash on llvm.eh.sjlh.longjmp (#77959)
Fix #60081.
Added:
Modified:
llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
llvm/test/CodeGen/X86/speculative-load-hardening.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 6301285fe95457..cff071c4f24b37 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1840,7 +1840,7 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
// just bail. Also check that its register class is one of the ones we
// can harden.
Register UseDefReg = UseMI.getOperand(0).getReg();
- if (!UseDefReg.isVirtual() || !canHardenRegister(UseDefReg))
+ if (!canHardenRegister(UseDefReg))
return {};
SingleUseMI = &UseMI;
@@ -1863,6 +1863,10 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
}
bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) {
+ // We only support hardening virtual registers.
+ if (!Reg.isVirtual())
+ return false;
+
auto *RC = MRI->getRegClass(Reg);
int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
if (RegBytes > 8)
@@ -1909,7 +1913,6 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt,
const DebugLoc &Loc) {
assert(canHardenRegister(Reg) && "Cannot harden this register!");
- assert(Reg.isVirtual() && "Cannot harden a physical register!");
auto *RC = MRI->getRegClass(Reg);
int Bytes = TRI->getRegSizeInBits(*RC) / 8;
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 0c47fcddc43af2..45da777ea4e2a2 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -1161,3 +1161,34 @@ define void @idempotent_atomic(ptr %x) speculative_load_hardening {
%tmp = atomicrmw or ptr %x, i32 0 seq_cst
ret void
}
+
+; Make sure we don't crash on longjmps (PR60081).
+declare void @llvm.eh.sjlj.longjmp(ptr)
+define void @test_longjmp(ptr %env) speculative_load_hardening {
+; X64-LABEL: test_longjmp:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbp
+; X64-NEXT: .cfi_def_cfa_offset 16
+; X64-NEXT: .cfi_offset %rbp, -16
+; X64-NEXT: movq %rsp, %rax
+; X64-NEXT: movq $-1, %rcx
+; X64-NEXT: sarq $63, %rax
+; X64-NEXT: orq %rax, %rdi
+; X64-NEXT: movq (%rdi), %rbp
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 16(%rdi), %rsp
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: jmpq *%rcx
+;
+; X64-LFENCE-LABEL: test_longjmp:
+; X64-LFENCE: # %bb.0:
+; X64-LFENCE-NEXT: pushq %rbp
+; X64-LFENCE-NEXT: .cfi_def_cfa_offset 16
+; X64-LFENCE-NEXT: .cfi_offset %rbp, -16
+; X64-LFENCE-NEXT: movq (%rdi), %rbp
+; X64-LFENCE-NEXT: movq 8(%rdi), %rax
+; X64-LFENCE-NEXT: movq 16(%rdi), %rsp
+; X64-LFENCE-NEXT: jmpq *%rax
+ call void @llvm.eh.sjlj.longjmp(ptr %env)
+ unreachable
+}
More information about the llvm-commits
mailing list