[libc-commits] [libc] [libc] fortify jmp buffer for x86-64 (PR #112769)
Nick Desaulniers via libc-commits
libc-commits at lists.llvm.org
Wed Nov 13 09:06:23 PST 2024
================
@@ -0,0 +1,147 @@
+//===-- Common macros for jmpbuf -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIBC_SRC_SETJMP_X86_64_COMMON_H
+#define LIBC_SRC_SETJMP_X86_64_COMMON_H
+
+#include "include/llvm-libc-macros/offsetof-macro.h"
+
+//===----------------------------------------------------------------------===//
+// Architecture specific macros for x86_64.
+//===----------------------------------------------------------------------===//
+
+#ifdef __i386__
+#define RET_REG eax
+#define BASE_REG ecx
+#define MUL_REG edx
+#define STACK_REG esp
+#define PC_REG eip
+#define NORMAL_STORE_REGS ebx, esi, edi, ebp
+#define STORE_ALL_REGS(M) M(ebx) M(esi) M(edi) M(ebp)
+#define LOAD_ALL_REGS(M) M(ebx) M(esi) M(edi) M(ebp) M(esp)
+#define DECLARE_ALL_REGS(M) M(ebx), M(esi), M(edi), M(ebp), M(esp), M(eip)
+#define LOAD_BASE() "mov 4(%%esp), %%ecx\n\t"
+#define CALCULATE_RETURN_VALUE() \
+ "mov 0x8(%%esp), %%eax" \
+ "cmp $0x1, %%eax\n\t" \
+ "adc $0x0, %%eax\n\t"
+#else
+#define RET_REG rax
+#define BASE_REG rdi
+#define MUL_REG rdx
+#define STACK_REG rsp
+#define PC_REG rip
+#define STORE_ALL_REGS(M) M(rbx) M(rbp) M(r12) M(r13) M(r14) M(r15)
+#define LOAD_ALL_REGS(M) M(rbx) M(rbp) M(r12) M(r13) M(r14) M(r15) M(rsp)
+#define DECLARE_ALL_REGS(M) \
+ M(rbx), M(rbp), M(r12), M(r13), M(r14), M(r15), M(rsp), M(rip)
+#define LOAD_BASE()
+#define CALCULATE_RETURN_VALUE() \
+ "cmp $0x1, %%esi\n\t" \
+ "adc $0x0, %%esi\n\t" \
+ "mov %%rsi, %%rax\n\t"
+#endif
+
+//===----------------------------------------------------------------------===//
+// Utility macros.
+//===----------------------------------------------------------------------===//
+
+#define _STR(X) #X
+#define STR(X) _STR(X)
+#define REG(X) "%%" STR(X)
+#define XOR(X, Y) "xor " REG(X) ", " REG(Y) "\n\t"
+#define MOV(X, Y) "mov " REG(X) ", " REG(Y) "\n\t"
+#define STORE(R, OFFSET, BASE) \
+ "mov " REG(R) ", %c[" STR(OFFSET) "](" REG(BASE) ")\n\t"
+#define LOAD(OFFSET, BASE, R) \
+ "mov %c[" STR(OFFSET) "](" REG(BASE) "), " REG(R) "\n\t"
+#define COMPUTE_STACK_TO_RET() \
+ "lea " STR(__SIZEOF_POINTER__) "(" REG(STACK_REG) "), " REG(RET_REG) "\n\t"
+#define COMPUTE_PC_TO_RET() "mov (" REG(STACK_REG) "), " REG(RET_REG) "\n\t"
+#define RETURN() "ret\n\t"
+#define DECLARE_OFFSET(X) [X] "i"(offsetof(__jmp_buf, X))
+#define CMP_MEM_REG(OFFSET, BASE, DST) \
+ "cmp %c[" STR(OFFSET) "](" REG(BASE) "), " REG(DST) "\n\t"
+#define JNE_LABEL(LABEL) "jne " STR(LABEL) "\n\t"
+
+//===----------------------------------------------------------------------===//
+// Checksum related macros.
+//===----------------------------------------------------------------------===//
+// For now, the checksum is computed with a simple multiply-xor-rotation
+// algorithm. The pesudo code is as follows:
+//
+// def checksum(x, acc):
+// masked = x ^ MASK
+// high, low = full_multiply(masked, acc)
+// return rotate(high ^ low, ROTATION)
----------------
nickdesaulniers wrote:
At this point, I'm wondering if we should be implementing this in C, and `call`'ing it from `asm`, and moving the result into the correct register. Then we can have more certainty that we're doing the same operation for each architecture. For example, I'd like to extend this jump buffer fortification to all other architectures. It might be nice to do this operation once in C, rather than multiple times in inline assembly.
https://github.com/llvm/llvm-project/pull/112769
More information about the libc-commits
mailing list