[llvm] [asan][x86] Abort instrumenting memintrinsics that target fs, gs (PR #129291)

Thor Preimesberger via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 13 12:20:42 PDT 2025


https://github.com/cheezeburglar updated https://github.com/llvm/llvm-project/pull/129291

>From 76146ce749d0aac3c4af38980ab372cc54b295a9 Mon Sep 17 00:00:00 2001
From: Thor Preimesberger <ThorP at protonmail.com>
Date: Tue, 25 Feb 2025 05:32:18 -0600
Subject: [PATCH] [asan][x86] Skip memintrinsics that write to special address
 spaces on x86-64 (#129291)

Currently, AddressSanitizer's instrumented memintrinsics are unable to write
to the address spaces indicated by the segment register fs and gs on x86-64.
This patch just skips the instrumentation of such memintrinsics.

Fixes #124238 for asan.
---
 .../Instrumentation/AddressSanitizer.cpp      | 12 +++
 .../AddressSanitizer/X86/bug_124238.ll        | 88 +++++++++++++++++++
 2 files changed, 100 insertions(+)
 create mode 100644 llvm/test/Instrumentation/AddressSanitizer/X86/bug_124238.ll

diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 8d8d56035a48f..ab82f6f8fe020 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -797,6 +797,7 @@ struct AddressSanitizer {
                                  bool IsWrite, size_t AccessSizeIndex,
                                  Value *SizeArgument, uint32_t Exp,
                                  RuntimeCallInserter &RTCI);
+  bool maybeIgnoreMemIntrinsic(MemIntrinsic *MI, const Triple &TargetTriple);
   void instrumentMemIntrinsic(MemIntrinsic *MI, RuntimeCallInserter &RTCI);
   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
   bool suppressInstrumentationSiteForDebug(int &Instrumented);
@@ -1340,10 +1341,21 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
     return IRB.CreateAdd(Shadow, ShadowBase);
 }
 
+bool AddressSanitizer::maybeIgnoreMemIntrinsic(MemIntrinsic *MI,
+                                               const Triple &TargetTriple) {
+  // Ignore FS and GS registers to prevent miscompilation
+  if ((MI->getDestAddressSpace() == 256 || MI->getDestAddresSpace() == 257) &&
+      TargetTriple.getArch() == Triple::x86_64)
+    return true;
+  return false;
+}
+
 // Instrument memset/memmove/memcpy
 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI,
                                               RuntimeCallInserter &RTCI) {
   InstrumentationIRBuilder IRB(MI);
+  if (maybeIgnoreMemIntrinsic(MI, TargetTriple))
+    return;
   if (isa<MemTransferInst>(MI)) {
     RTCI.createRuntimeCall(
         IRB, isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
diff --git a/llvm/test/Instrumentation/AddressSanitizer/X86/bug_124238.ll b/llvm/test/Instrumentation/AddressSanitizer/X86/bug_124238.ll
new file mode 100644
index 0000000000000..90ce8ffac1750
--- /dev/null
+++ b/llvm/test/Instrumentation/AddressSanitizer/X86/bug_124238.ll
@@ -0,0 +1,88 @@
+; RUN: opt -passes=asan %s -S | FileCheck %s
+
+;; Punt AddressSanitizer::instrumentMemIntrinsics out for MemIntrinsics
+;; that need write to unsupported address spaces on X86
+;; PR124238: https://www.github.com/llvm/llvm-project/issues/124238
+
+target triple = "x86_64-unknown-linux-gnu"
+
+$.str.658906a285b7a0f82dabd9915e07848c = comdat any
+ at .str = internal constant { [2 x i8], [30 x i8] } { [2 x i8] c"x\00", [30 x i8] zeroinitializer }, comdat($.str.658906a285b7a0f82dabd9915e07848c), align 32
+ at 0 = private alias { [2 x i8], [30 x i8] }, ptr @.str
+
+define void @test_memcpy(i64 noundef %addr) sanitize_address #0 {
+entry:
+  %addr.addr = alloca i64, align 8
+  store i64 %addr, ptr %addr.addr, align 8
+  %0 = load i64, ptr %addr.addr, align 8
+  %1 = inttoptr i64 %0 to ptr addrspace(257)
+  call void @llvm.memcpy.p257.p0.i64(ptr addrspace(257) align 1 %1, ptr align 1 @.str, i64 1, i1 false)
+; CHECK: llvm.memcpy
+  %2 = load i64, ptr %addr.addr, align 8
+  %3 = inttoptr i64 %2 to ptr addrspace(256)
+  call void @llvm.memcpy.p256.p0.i64(ptr addrspace(256) align 1 %3, ptr align 1 @.str, i64 1, i1 false)
+; CHECK: llvm.memcpy
+  %4 = load i64, ptr %addr.addr, align 8
+  %5 = inttoptr i64 %2 to ptr addrspace(258)
+  call void @llvm.memcpy.p258.p0.i64(ptr addrspace(258) align 1 %5, ptr align 1 @.str, i64 1, i1 false)
+; CHECK: __asan_memcpy
+  %6 = load i64, ptr %addr.addr, align 8
+  %7 = inttoptr i64 %2 to ptr addrspace(0)
+  call void @llvm.memcpy.p258.p0.i64(ptr addrspace(0) align 1 %7, ptr align 1 @.str, i64 1, i1 false)
+; CHECK: __asan_memcpy
+  ret void
+}
+
+define void @test_memset(i64 noundef %addr) sanitize_address #0 {
+entry:
+  %addr.addr = alloca i64, align 8
+  store i64 %addr, ptr %addr.addr, align 8
+  %0 = load i64, ptr %addr.addr, align 8
+  %1 = inttoptr i64 %0 to ptr addrspace(257)
+  call void @llvm.memset.p257.i64(ptr addrspace(257) align 1 %1, i8 0, i64 1, i1 false)
+; CHECK: llvm.memset
+  %2 = load i64, ptr %addr.addr, align 8
+  %3 = inttoptr i64 %2 to ptr addrspace(256)
+  call void @llvm.memset.p256.i64(ptr addrspace(256) align 1 %3, i8 0, i64 1, i1 false)
+; CHECK: llvm.memset
+  %4 = load i64, ptr %addr.addr, align 8
+  %5 = inttoptr i64 %2 to ptr addrspace(258)
+  call void @llvm.memset.p258.i64(ptr addrspace(258) align 1 %5, i8 0, i64 1, i1 false)
+; CHECK: __asan_memset
+  %6 = load i64, ptr %addr.addr, align 8
+  %7 = inttoptr i64 %2 to ptr addrspace(0)
+  call void @llvm.memset.p258.i64(ptr addrspace(0) align 1 %7, i8 0, i64 1, i1 false)
+; CHECK: __asan_memset
+  ret void
+}
+
+define void @test_memmove(i64 noundef %addr) sanitize_address #0 {
+entry:
+  %addr.addr = alloca i64, align 8
+  store i64 %addr, ptr %addr.addr, align 8
+  %0 = load i64, ptr %addr.addr, align 8
+  %1 = inttoptr i64 %0 to ptr addrspace(257)
+  %2 = load i64, ptr %addr.addr, align 8
+  %3 = inttoptr i64 %2 to ptr
+  call void @llvm.memmove.p257.p0.i64(ptr addrspace(257) align 1 %1, ptr align 1 %3, i64 1, i1 false)
+; CHECK: llvm.memmove
+  %4 = load i64, ptr %addr.addr, align 8
+  %5 = inttoptr i64 %4 to ptr addrspace(256)
+  %6 = load i64, ptr %addr.addr, align 8
+  %7 = inttoptr i64 %6 to ptr
+  call void @llvm.memmove.p256.p0.i64(ptr addrspace(256) align 1 %5, ptr align 1 %7, i64 1, i1 false)
+; CHECK: llvm.memmove
+  %8 = load i64, ptr %addr.addr, align 8
+  %9 = inttoptr i64 %4 to ptr addrspace(258)
+  %10 = load i64, ptr %addr.addr, align 8
+  %11 = inttoptr i64 %6 to ptr
+  call void @llvm.memmove.p256.p0.i64(ptr addrspace(258) align 1 %9, ptr align 1 %11, i64 1, i1 false)
+; CHECK: __asan_memmove
+  %12 = load i64, ptr %addr.addr, align 8
+  %13 = inttoptr i64 %4 to ptr addrspace(0)
+  %14 = load i64, ptr %addr.addr, align 8
+  %15 = inttoptr i64 %6 to ptr
+  call void @llvm.memmove.p256.p0.i64(ptr addrspace(0) align 1 %13, ptr align 1 %15, i64 1, i1 false)
+; CHECK: __asan_memmove
+  ret void
+}



More information about the llvm-commits mailing list