[llvm-branch-commits] [clang] Add pointer field protection feature. (PR #133538)
Peter Collingbourne via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Aug 8 11:31:14 PDT 2025
https://github.com/pcc updated https://github.com/llvm/llvm-project/pull/133538
>From e816ed160ed53ff8d9d9039b778c41ecad8a7da2 Mon Sep 17 00:00:00 2001
From: Peter Collingbourne <pcc at google.com>
Date: Wed, 6 Aug 2025 17:12:25 -0700
Subject: [PATCH 1/2] Add tests and documentation
Created using spr 1.3.6-beta.1
---
clang/docs/StructureProtection.rst | 32 +++-
clang/docs/index.rst | 1 +
clang/lib/CodeGen/CGCall.cpp | 21 ++-
clang/test/CodeGenCXX/pfp-coerce.cpp | 220 +++++++++++++++++++++++
clang/test/CodeGenCXX/pfp-null-init.cpp | 13 +-
clang/test/CodeGenCXX/pfp-struct-gep.cpp | 29 ++-
6 files changed, 296 insertions(+), 20 deletions(-)
create mode 100644 clang/test/CodeGenCXX/pfp-coerce.cpp
diff --git a/clang/docs/StructureProtection.rst b/clang/docs/StructureProtection.rst
index 06187f270d49a..6db01cc579b8e 100644
--- a/clang/docs/StructureProtection.rst
+++ b/clang/docs/StructureProtection.rst
@@ -9,11 +9,12 @@ Structure Protection
Introduction
============
-Structure protection is an experimental mitigation against use-after-free
-vulnerabilities. For more details, please see the original `RFC
+Structure protection is an *experimental* mitigation
+against use-after-free vulnerabilities. For
+more information, please see the original `RFC
<https://discourse.llvm.org/t/rfc-structure-protection-a-family-of-uaf-mitigation-techniques/85555>`_.
-An independent set of documentation will be added here when the feature
-is promoted to non-experimental.
+An independent set of documentation will be contributed when the feature
+is promoted to stable.
Usage
=====
@@ -24,9 +25,30 @@ To use structure protection, build your program using one of the flags:
field protection with untagged pointers.
- ``-fexperimental-pointer-field-protection=tagged``: Enable pointer
- field protection with heap pointers assumed to be tagged by the allocator:
+ field protection with heap pointers assumed to be tagged by the allocator.
The entire C++ part of the program must be built with a consistent
``-fexperimental-pointer-field-protection`` flag, and the C++ standard
library must also be built with the same flag and statically linked into
the program.
+
+To build libc++ with pointer field protection support, pass the following
+CMake flags:
+
+.. code-block:: console
+
+ "-DRUNTIMES_${triple}_LIBCXXABI_ENABLE_SHARED=OFF" \
+ "-DRUNTIMES_${triple}_LIBCXX_USE_COMPILER_RT=ON" \
+ "-DRUNTIMES_${triple}_LIBCXX_PFP=untagged" \
+ "-DRUNTIMES_${triple}_LIBCXX_ENABLE_SHARED=OFF" \
+ "-DRUNTIMES_${triple}_LIBCXX_TEST_CONFIG=llvm-libc++-static.cfg.in" \
+ "-DRUNTIMES_${triple}_LIBUNWIND_ENABLE_SHARED=OFF" \
+
+where ``${triple}`` is your target triple, such as
+``aarch64-unknown-linux``.
+
+The resulting toolchain may then be used to build programs
+with pointer field protection by passing ``-stdlib=libc++
+-fexperimental-pointer-field-protection=untagged`` at compile time
+and ``-Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic -lm -fuse-ld=lld
+-static-libstdc++`` at link time.
diff --git a/clang/docs/index.rst b/clang/docs/index.rst
index 4871d05e932ae..e267c66af1e1d 100644
--- a/clang/docs/index.rst
+++ b/clang/docs/index.rst
@@ -47,6 +47,7 @@ Using Clang as a Compiler
LTOVisibility
SafeStack
ShadowCallStack
+ StructureProtection
SourceBasedCodeCoverage
StandardCPlusPlusModules
Modules
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index f7fca42cd7a07..c26d51c28f305 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -3538,6 +3538,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (SrcSize > DstSize) {
Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
}
+
+ // Structures with PFP fields require a coerced store to add any
+ // pointer signatures.
+ if (getContext().hasPFPFields(Ty)) {
+ llvm::Value *Struct = Builder.CreateLoad(Ptr);
+ CreatePFPCoercedStore(Struct, Ty, Ptr, *this);
+ }
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -5717,15 +5724,25 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
uint64_t SrcSize = SrcTypeSize.getFixedValue();
uint64_t DstSize = DstTypeSize.getFixedValue();
+ bool HasPFPFields = getContext().hasPFPFields(I->Ty);
// If the source type is smaller than the destination type of the
// coerce-to logic, copy the source value into a temp alloca the size
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
- if (SrcSize < DstSize) {
+ if (HasPFPFields || SrcSize < DstSize) {
Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
Src.getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ if (HasPFPFields) {
+ // Structures with PFP fields require a coerced load to remove any
+ // pointer signatures.
+ Builder.CreateStore(
+ CreatePFPCoercedLoad(Src, I->Ty, ArgInfo.getCoerceToType(),
+ *this),
+ TempAlloca);
+ } else {
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ }
Src = TempAlloca;
} else {
Src = Src.withElementType(STy);
diff --git a/clang/test/CodeGenCXX/pfp-coerce.cpp b/clang/test/CodeGenCXX/pfp-coerce.cpp
new file mode 100644
index 0000000000000..db53f2bf45c70
--- /dev/null
+++ b/clang/test/CodeGenCXX/pfp-coerce.cpp
@@ -0,0 +1,220 @@
+// RUN: %clang_cc1 -triple aarch64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AARCH64 %s
+// RUN: %clang_cc1 -triple x86_64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,X86_64 %s
+
+// Non-standard layout. Pointer fields are signed and discriminated by type.
+struct Pointer {
+ int* ptr;
+private:
+ int private_data;
+};
+
+void pass_pointer_callee(Pointer p);
+
+// CHECK: define dso_local void @_Z12pass_pointerP7Pointer(
+void pass_pointer(Pointer *pp) {
+ // CHECK: %0 = load ptr, ptr %pp.addr, align 8
+ // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %agg.tmp, ptr align 8 %0, i64 16, i1 false)
+ // CHECK: %1 = getelementptr inbounds i8, ptr %agg.tmp, i64 0
+
+ // AARCH64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 36403, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 51, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+
+ // CHECK: %3 = load ptr, ptr %2, align 8
+
+ // AARCH64: %4 = ptrtoint ptr %3 to i64
+ // AARCH64: %5 = insertvalue [2 x i64] poison, i64 %4, 0
+ // AARCH64: %6 = getelementptr inbounds i8, ptr %agg.tmp, i64 8
+ // AARCH64: %7 = load i64, ptr %6, align 8
+ // AARCH64: %8 = insertvalue [2 x i64] %5, i64 %7, 1
+ // AARCH64: call void @_Z19pass_pointer_callee7Pointer([2 x i64] %8)
+
+ // X86_64: %4 = insertvalue { ptr, i32 } poison, ptr %3, 0
+ // X86_64: %5 = getelementptr inbounds i8, ptr %agg.tmp, i64 8
+ // X86_64: %6 = load i32, ptr %5, align 8
+ // X86_64: %7 = insertvalue { ptr, i32 } %4, i32 %6, 1
+ // X86_64: store { ptr, i32 } %7, ptr %agg.tmp.coerce, align 8
+ // X86_64: %8 = getelementptr inbounds nuw { ptr, i32 }, ptr %agg.tmp.coerce, i32 0, i32 0
+ // X86_64: %9 = load ptr, ptr %8, align 8
+ // X86_64: %10 = getelementptr inbounds nuw { ptr, i32 }, ptr %agg.tmp.coerce, i32 0, i32 1
+ // X86_64: %11 = load i32, ptr %10, align 8
+ // X86_64: call void @_Z19pass_pointer_callee7Pointer(ptr %9, i32 %11)
+ pass_pointer_callee(*pp);
+}
+
+// AARCH64: define dso_local void @_Z14passed_pointer7PointerPS_([2 x i64] %p.coerce, ptr noundef %pp)
+// X86_64: define dso_local void @_Z14passed_pointer7PointerPS_(ptr %p.coerce0, i32 %p.coerce1, ptr noundef %pp)
+void passed_pointer(Pointer p, Pointer *pp) {
+ // AARCH64: %p = alloca %struct.Pointer, align 8
+ // AARCH64: %pp.addr = alloca ptr, align 8
+ // AARCH64: %0 = extractvalue [2 x i64] %p.coerce, 0
+ // AARCH64: %1 = getelementptr inbounds i8, ptr %p, i64 0
+ // AARCH64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 36403, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // AARCH64: %3 = inttoptr i64 %0 to ptr
+ // AARCH64: store ptr %3, ptr %2, align 8
+ // AARCH64: %4 = extractvalue [2 x i64] %p.coerce, 1
+ // AARCH64: %5 = getelementptr inbounds i8, ptr %p, i64 8
+ // AARCH64: store i64 %4, ptr %5, align 8
+ // AARCH64: store ptr %pp, ptr %pp.addr, align 8
+ // AARCH64: %6 = load ptr, ptr %pp.addr, align 8
+ // AARCH64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %6, ptr align 8 %p, i64 12, i1 false)
+
+ // X86_64: %p = alloca %struct.Pointer, align 8
+ // X86_64: %pp.addr = alloca ptr, align 8
+ // X86_64: %0 = getelementptr inbounds nuw { ptr, i32 }, ptr %p, i32 0, i32 0
+ // X86_64: store ptr %p.coerce0, ptr %0, align 8
+ // X86_64: %1 = getelementptr inbounds nuw { ptr, i32 }, ptr %p, i32 0, i32 1
+ // X86_64: store i32 %p.coerce1, ptr %1, align 8
+ // X86_64: %2 = load %struct.Pointer, ptr %p, align 8
+ // X86_64: %3 = extractvalue %struct.Pointer %2, 0
+ // X86_64: %4 = getelementptr inbounds i8, ptr %p, i64 0
+ // X86_64: %5 = call ptr @llvm.protected.field.ptr(ptr %4, i64 51, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // X86_64: store ptr %3, ptr %5, align 8
+ // X86_64: %6 = extractvalue %struct.Pointer %2, 1
+ // X86_64: %7 = getelementptr inbounds i8, ptr %p, i64 8
+ // X86_64: store i32 %6, ptr %7, align 8
+ // X86_64: %8 = extractvalue %struct.Pointer %2, 2
+ // X86_64: %9 = getelementptr inbounds i8, ptr %p, i64 12
+ // X86_64: store [4 x i8] %8, ptr %9, align 4
+ // X86_64: store ptr %pp, ptr %pp.addr, align 8
+ // X86_64: %10 = load ptr, ptr %pp.addr, align 8
+ // X86_64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %10, ptr align 8 %p, i64 12, i1 false)
+ *pp = p;
+}
+
+// AARCH64: define dso_local [2 x i64] @_Z14return_pointerP7Pointer(ptr noundef %pp)
+// X86_64: define dso_local { ptr, i32 } @_Z14return_pointerP7Pointer(ptr noundef %pp)
+Pointer return_pointer(Pointer *pp) {
+ // AARCH64: %retval = alloca %struct.Pointer, align 8
+ // AARCH64: %pp.addr = alloca ptr, align 8
+ // AARCH64: store ptr %pp, ptr %pp.addr, align 8
+ // AARCH64: %0 = load ptr, ptr %pp.addr, align 8
+ // AARCH64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %retval, ptr align 8 %0, i64 16, i1 false)
+ // AARCH64: %1 = getelementptr inbounds i8, ptr %retval, i64 0
+ // AARCH64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 36403, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // AARCH64: %3 = load ptr, ptr %2, align 8
+ // AARCH64: %4 = ptrtoint ptr %3 to i64
+ // AARCH64: %5 = insertvalue [2 x i64] poison, i64 %4, 0
+ // AARCH64: %6 = getelementptr inbounds i8, ptr %retval, i64 8
+ // AARCH64: %7 = load i64, ptr %6, align 8
+ // AARCH64: %8 = insertvalue [2 x i64] %5, i64 %7, 1
+ // AARCH64: ret [2 x i64] %8
+
+ // X86_64: %retval = alloca %struct.Pointer, align 8
+ // X86_64: %pp.addr = alloca ptr, align 8
+ // X86_64: store ptr %pp, ptr %pp.addr, align 8
+ // X86_64: %0 = load ptr, ptr %pp.addr, align 8
+ // X86_64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %retval, ptr align 8 %0, i64 16, i1 false)
+ // X86_64: %1 = getelementptr inbounds i8, ptr %retval, i64 0
+ // X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 51, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // X86_64: %3 = load ptr, ptr %2, align 8
+ // X86_64: %4 = insertvalue { ptr, i32 } poison, ptr %3, 0
+ // X86_64: %5 = getelementptr inbounds i8, ptr %retval, i64 8
+ // X86_64: %6 = load i32, ptr %5, align 8
+ // X86_64: %7 = insertvalue { ptr, i32 } %4, i32 %6, 1
+ // X86_64: ret { ptr, i32 } %7
+ return *pp;
+}
+
+Pointer returned_pointer_callee();
+
+// CHECK: define dso_local void @_Z16returned_pointerP7Pointer(ptr noundef %pp)
+void returned_pointer(Pointer *pp) {
+ // AARCH64: %pp.addr = alloca ptr, align 8
+ // AARCH64: %ref.tmp = alloca %struct.Pointer, align 8
+ // AARCH64: store ptr %pp, ptr %pp.addr, align 8
+ // AARCH64: %call = call [2 x i64] @_Z23returned_pointer_calleev()
+ // AARCH64: %0 = extractvalue [2 x i64] %call, 0
+ // AARCH64: %1 = getelementptr inbounds i8, ptr %ref.tmp, i64 0
+ // AARCH64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 36403, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // AARCH64: %3 = inttoptr i64 %0 to ptr
+ // AARCH64: store ptr %3, ptr %2, align 8
+ // AARCH64: %4 = extractvalue [2 x i64] %call, 1
+ // AARCH64: %5 = getelementptr inbounds i8, ptr %ref.tmp, i64 8
+ // AARCH64: store i64 %4, ptr %5, align 8
+ // AARCH64: %6 = load ptr, ptr %pp.addr, align 8
+ // AARCH64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %6, ptr align 8 %ref.tmp, i64 12, i1 false)
+
+ // X86_64: %pp.addr = alloca ptr, align 8
+ // X86_64: %ref.tmp = alloca %struct.Pointer, align 8
+ // X86_64: store ptr %pp, ptr %pp.addr, align 8
+ // X86_64: %call = call { ptr, i32 } @_Z23returned_pointer_calleev()
+ // X86_64: %0 = extractvalue { ptr, i32 } %call, 0
+ // X86_64: %1 = getelementptr inbounds i8, ptr %ref.tmp, i64 0
+ // X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 51, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS7Pointer.ptr) ]
+ // X86_64: store ptr %0, ptr %2, align 8
+ // X86_64: %3 = extractvalue { ptr, i32 } %call, 1
+ // X86_64: %4 = getelementptr inbounds i8, ptr %ref.tmp, i64 8
+ // X86_64: store i32 %3, ptr %4, align 8
+ // X86_64: %5 = load ptr, ptr %pp.addr, align 8
+ // X86_64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %5, ptr align 8 %ref.tmp, i64 12, i1 false)
+ *pp = returned_pointer_callee();
+}
+
+class __force_nonstandard_layout_base1 {};
+class __force_nonstandard_layout_base2 : __force_nonstandard_layout_base1 {};
+class __force_nonstandard_layout : __force_nonstandard_layout_base1, __force_nonstandard_layout_base2 {};
+
+// Non-standard layout, non-trivially destructible.
+// Pointer fields are signed and discriminated by address.
+// Trivial ABI: passed and returned by value despite being non-trivial.
+struct [[clang::trivial_abi]] TrivialAbiPointer : __force_nonstandard_layout {
+ int *ptr;
+ ~TrivialAbiPointer();
+};
+
+// AARCH64: define dso_local void @_Z24pass_trivial_abi_pointer17TrivialAbiPointerPS_(i64 %p.coerce, ptr noundef %pp)
+// X86_64: define dso_local void @_Z24pass_trivial_abi_pointer17TrivialAbiPointerPS_(ptr %p.coerce, ptr noundef %pp)
+void pass_trivial_abi_pointer(TrivialAbiPointer p, TrivialAbiPointer *pp) {
+ // AARCH64: %p = alloca %struct.TrivialAbiPointer, align 8
+ // AARCH64: %pp.addr = alloca ptr, align 8
+ // AARCH64: %coerce.dive = getelementptr inbounds nuw %struct.TrivialAbiPointer, ptr %p, i32 0, i32 0
+ // AARCH64: %0 = inttoptr i64 %p.coerce to ptr
+ // AARCH64: %1 = getelementptr inbounds i8, ptr %coerce.dive, i64 0
+ // AARCH64: %2 = ptrtoint ptr %coerce.dive to i64
+ // AARCH64: %3 = call ptr @llvm.protected.field.ptr(ptr %1, i64 %2, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS17TrivialAbiPointer.ptr) ]
+ // AARCH64: store ptr %0, ptr %3, align 8
+ // AARCH64: store ptr %pp, ptr %pp.addr, align 8
+ // AARCH64: %4 = load ptr, ptr %pp.addr, align 8
+ // AARCH64: %call = call noundef nonnull align 8 dereferenceable(8) ptr @_ZN17TrivialAbiPointeraSERKS_(ptr noundef nonnull align 8 dereferenceable(8) %4, ptr noundef nonnull align 8 dereferenceable(8) %p)
+ // AARCH64: call void @_ZN17TrivialAbiPointerD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %p)
+
+ // X86_64: %p = alloca %struct.TrivialAbiPointer, align 8
+ // X86_64: %pp.addr = alloca ptr, align 8
+ // X86_64: %coerce.dive = getelementptr inbounds nuw %struct.TrivialAbiPointer, ptr %p, i32 0, i32 0
+ // X86_64: %0 = getelementptr inbounds i8, ptr %coerce.dive, i64 0
+ // X86_64: %1 = call ptr @llvm.protected.field.ptr(ptr %0, i64 33, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS17TrivialAbiPointer.ptr) ]
+ // X86_64: store ptr %p.coerce, ptr %1, align 8
+ // X86_64: store ptr %pp, ptr %pp.addr, align 8
+ // X86_64: %2 = load ptr, ptr %pp.addr, align 8
+ // X86_64: %call = call noundef nonnull align 8 dereferenceable(8) ptr @_ZN17TrivialAbiPointeraSERKS_(ptr noundef nonnull align 8 dereferenceable(8) %2, ptr noundef nonnull align 8 dereferenceable(8) %p)
+ // X86_64: call void @_ZN17TrivialAbiPointerD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %p)
+ *pp = p;
+}
+
+// AARCH64: define dso_local i64 @_Z26return_trivial_abi_pointerP17TrivialAbiPointer(ptr noundef %pp)
+// X86_64: define dso_local ptr @_Z26return_trivial_abi_pointerP17TrivialAbiPointer(ptr noundef %pp)
+TrivialAbiPointer return_trivial_abi_pointer(TrivialAbiPointer *pp) {
+ // AARCH64: %retval = alloca %struct.TrivialAbiPointer, align 8
+ // AARCH64: %pp.addr = alloca ptr, align 8
+ // AARCH64: store ptr %pp, ptr %pp.addr, align 8
+ // AARCH64: %0 = load ptr, ptr %pp.addr, align 8
+ // AARCH64: call void @_ZN17TrivialAbiPointerC1ERKS_(ptr noundef nonnull align 8 dereferenceable(8) %retval, ptr noundef nonnull align 8 dereferenceable(8) %0)
+ // AARCH64: %1 = getelementptr inbounds i8, ptr %retval, i64 0
+ // AARCH64: %2 = ptrtoint ptr %retval to i64
+ // AARCH64: %3 = call ptr @llvm.protected.field.ptr(ptr %1, i64 %2, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS17TrivialAbiPointer.ptr) ]
+ // AARCH64: %4 = load ptr, ptr %3, align 8
+ // AARCH64: %5 = ptrtoint ptr %4 to i64
+ // AARCH64: ret i64 %5
+
+ // X86_64: %retval = alloca %struct.TrivialAbiPointer, align 8
+ // X86_64: %pp.addr = alloca ptr, align 8
+ // X86_64: store ptr %pp, ptr %pp.addr, align 8
+ // X86_64: %0 = load ptr, ptr %pp.addr, align 8
+ // X86_64: call void @_ZN17TrivialAbiPointerC1ERKS_(ptr noundef nonnull align 8 dereferenceable(8) %retval, ptr noundef nonnull align 8 dereferenceable(8) %0)
+ // X86_64: %1 = getelementptr inbounds i8, ptr %retval, i64 0
+ // X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 33, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS17TrivialAbiPointer.ptr) ]
+ // X86_64: %3 = load ptr, ptr %2, align 8
+ // X86_64: ret ptr %3
+ return *pp;
+}
+
diff --git a/clang/test/CodeGenCXX/pfp-null-init.cpp b/clang/test/CodeGenCXX/pfp-null-init.cpp
index 94c1224f1be13..f0a9152c63ae7 100644
--- a/clang/test/CodeGenCXX/pfp-null-init.cpp
+++ b/clang/test/CodeGenCXX/pfp-null-init.cpp
@@ -1,4 +1,5 @@
-// RUN: %clang_cc1 -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,AARCH64 %s
+// RUN: %clang_cc1 -triple x86_64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck --check-prefixes=CHECK,X86_64 %s
struct S {
void *p;
@@ -8,9 +9,13 @@ struct S {
// CHECK-LABEL: null_init
void null_init() {
+ // Check that null initialization was correctly applied to the pointer field.
+ // CHECK: %s = alloca %struct.S, align 8
+ // CHECK: call void @llvm.memset.p0.i64(ptr align 8 %s, i8 0, i64 16, i1 false)
+ // CHECK: %0 = getelementptr inbounds i8, ptr %s, i64 0
+ // AARCH64: %1 = call ptr @llvm.protected.field.ptr(ptr %0, i64 29832, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.p) ]
+ // X86_64: %1 = call ptr @llvm.protected.field.ptr(ptr %0, i64 136, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.p) ]
+ // CHECK: store ptr null, ptr %1, align 8
S s{};
}
-// Check that the constructor was applied
-// CHECK: call void @llvm.memset.{{.*}}
-// CHECK: call {{.*}} @llvm.protected.field.ptr({{.*}}, i64 0, metadata !"_ZTS1S.p", i1 false)
\ No newline at end of file
diff --git a/clang/test/CodeGenCXX/pfp-struct-gep.cpp b/clang/test/CodeGenCXX/pfp-struct-gep.cpp
index 964545efa9f4b..700934baf8420 100644
--- a/clang/test/CodeGenCXX/pfp-struct-gep.cpp
+++ b/clang/test/CodeGenCXX/pfp-struct-gep.cpp
@@ -1,6 +1,5 @@
-// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK-NOPFP
-// RUN: %clang_cc1 -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s -check-prefix=CHECK-PFP
-
+// RUN: %clang_cc1 -triple aarch64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s -check-prefixes=CHECK,AARCH64
+// RUN: %clang_cc1 -triple x86_64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s -check-prefixes=CHECK,X86_64
struct S {
int* ptr;
@@ -10,16 +9,28 @@ struct S {
// CHECK-LABEL: load_pointers
int* load_pointers(S *t) {
+ // CHECK: %t.addr = alloca ptr, align 8
+ // CHECK: store ptr %t, ptr %t.addr, align 8
+ // CHECK: %0 = load ptr, ptr %t.addr, align 8
+ // CHECK: %ptr = getelementptr inbounds nuw %struct.S, ptr %0, i32 0, i32 0
+ // AARCH64: %1 = call ptr @llvm.protected.field.ptr(ptr %ptr, i64 63261, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.ptr) ]
+ // X86_64: %1 = call ptr @llvm.protected.field.ptr(ptr %ptr, i64 29, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.ptr) ]
+ // CHECK: %2 = load ptr, ptr %1, align 8
+ // CHECK: ret ptr %2
return t->ptr;
}
-// CHECK-PFP: call {{.*}} @llvm.protected.field.ptr({{.*}}, i64 0, metadata !"_ZTS1S.ptr", i1 false)
-// CHECK-NOPFP: getelementptr
// CHECK-LABEL: store_pointers
void store_pointers(S* t, int* p) {
+ // CHECK: %t.addr = alloca ptr, align 8
+ // CHECK: %p.addr = alloca ptr, align 8
+ // CHECK: store ptr %t, ptr %t.addr, align 8
+ // CHECK: store ptr %p, ptr %p.addr, align 8
+ // CHECK: %0 = load ptr, ptr %p.addr, align 8
+ // CHECK: %1 = load ptr, ptr %t.addr, align 8
+ // CHECK: %ptr = getelementptr inbounds nuw %struct.S, ptr %1, i32 0, i32 0
+ // AARCH64: %2 = call ptr @llvm.protected.field.ptr(ptr %ptr, i64 63261, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.ptr) ]
+ // X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %ptr, i64 29, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.ptr) ]
+ // CHECK: store ptr %0, ptr %2, align 8
t->ptr = p;
}
-// CHECK-PFP: call {{.*}} @llvm.protected.field.ptr({{.*}}, i64 0, metadata !"_ZTS1S.ptr", i1 false)
-// CHECK-NOPFP: getelementptr
-
-
>From 0866b8f821c126e562f7599a0b4c5f05908549a8 Mon Sep 17 00:00:00 2001
From: Peter Collingbourne <pcc at google.com>
Date: Fri, 8 Aug 2025 11:30:58 -0700
Subject: [PATCH 2/2] Improve coerce logic and add coerce test
Created using spr 1.3.6-beta.1
---
clang/include/clang/AST/ASTContext.h | 4 +-
clang/lib/AST/ASTContext.cpp | 15 ++-
clang/lib/CodeGen/CGBuiltin.cpp | 52 ++++++---
clang/lib/CodeGen/CGExpr.cpp | 1 +
clang/lib/Sema/SemaTypeTraits.cpp | 20 ++--
clang/test/CodeGenCXX/pfp-coerce.cpp | 4 +-
clang/test/CodeGenCXX/pfp-memcpy.cpp | 4 +-
.../CodeGenCXX/pfp-trivially-relocatable.cpp | 101 ++++++++++++++++++
8 files changed, 167 insertions(+), 34 deletions(-)
create mode 100644 clang/test/CodeGenCXX/pfp-trivially-relocatable.cpp
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index 7c04a3923f066..b1864ca4eccce 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -186,6 +186,7 @@ struct TypeInfoChars {
struct PFPField {
CharUnits offset;
FieldDecl *field;
+ bool isWithinUnion;
};
/// Holds long-lived AST nodes (such as types and decls) that can be
@@ -3727,7 +3728,8 @@ OPT_LIST(V)
bool isPFPStruct(const RecordDecl *rec) const;
void findPFPFields(QualType Ty, CharUnits Offset,
- std::vector<PFPField> &Fields, bool IncludeVBases) const;
+ std::vector<PFPField> &Fields, bool IncludeVBases,
+ bool IsWithinUnion = false) const;
bool hasPFPFields(QualType ty) const;
bool isPFPField(const FieldDecl *field) const;
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 1dc5b6bdcafdc..5c9d481406bb6 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -15183,6 +15183,10 @@ bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl,
}
bool ASTContext::arePFPFieldsTriviallyRelocatable(const RecordDecl *RD) const {
+ bool IsPAuthSupported =
+ getTargetInfo().getTriple().getArch() == llvm::Triple::aarch64;
+ if (!IsPAuthSupported)
+ return true;
if (getLangOpts().getPointerFieldProtection() ==
LangOptions::PointerFieldProtectionKind::Tagged)
return !isa<CXXRecordDecl>(RD) ||
@@ -15200,7 +15204,7 @@ bool ASTContext::isPFPStruct(const RecordDecl *rec) const {
void ASTContext::findPFPFields(QualType Ty, CharUnits Offset,
std::vector<PFPField> &Fields,
- bool IncludeVBases) const {
+ bool IncludeVBases, bool IsWithinUnion) const {
if (auto *AT = getAsConstantArrayType(Ty)) {
if (auto *ElemDecl = AT->getElementType()->getAsCXXRecordDecl()) {
const ASTRecordLayout &ElemRL = getASTRecordLayout(ElemDecl);
@@ -15213,26 +15217,27 @@ void ASTContext::findPFPFields(QualType Ty, CharUnits Offset,
auto *Decl = Ty->getAsCXXRecordDecl();
if (!Decl)
return;
+ IsWithinUnion |= Decl->isUnion();
const ASTRecordLayout &RL = getASTRecordLayout(Decl);
for (FieldDecl *field : Decl->fields()) {
CharUnits fieldOffset =
Offset + toCharUnitsFromBits(RL.getFieldOffset(field->getFieldIndex()));
if (isPFPField(field))
- Fields.push_back({fieldOffset, field});
- findPFPFields(field->getType(), fieldOffset, Fields, true);
+ Fields.push_back({fieldOffset, field, IsWithinUnion});
+ findPFPFields(field->getType(), fieldOffset, Fields, true, IsWithinUnion);
}
for (auto &Base : Decl->bases()) {
if (Base.isVirtual())
continue;
CharUnits BaseOffset =
Offset + RL.getBaseClassOffset(Base.getType()->getAsCXXRecordDecl());
- findPFPFields(Base.getType(), BaseOffset, Fields, false);
+ findPFPFields(Base.getType(), BaseOffset, Fields, false, IsWithinUnion);
}
if (IncludeVBases) {
for (auto &Base : Decl->vbases()) {
CharUnits BaseOffset =
Offset + RL.getVBaseClassOffset(Base.getType()->getAsCXXRecordDecl());
- findPFPFields(Base.getType(), BaseOffset, Fields, false);
+ findPFPFields(Base.getType(), BaseOffset, Fields, false, IsWithinUnion);
}
}
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index d11726f99afd9..7be70ee46eaf5 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -4499,14 +4499,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ Value *TypeSize = ConstantInt::get(
+ SizeVal->getType(),
+ getContext()
+ .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
+ .getQuantity());
if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
- SizeVal = Builder.CreateMul(
- SizeVal,
- ConstantInt::get(
- SizeVal->getType(),
- getContext()
- .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
- .getQuantity()));
+ SizeVal = Builder.CreateMul(SizeVal, TypeSize);
EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
@@ -4515,13 +4514,38 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
std::vector<PFPField> PFPFields;
getContext().findPFPFields(E->getArg(0)->getType()->getPointeeType(),
CharUnits::Zero(), PFPFields, true);
- for (auto &Field : PFPFields) {
- if (getContext().arePFPFieldsTriviallyRelocatable(
- Field.field->getParent()))
- continue;
- auto DestFieldPtr = EmitAddressOfPFPField(Dest, Field);
- auto SrcFieldPtr = EmitAddressOfPFPField(Src, Field);
- Builder.CreateStore(Builder.CreateLoad(SrcFieldPtr), DestFieldPtr);
+ if (!PFPFields.empty()) {
+ BasicBlock *Entry = Builder.GetInsertBlock();
+ BasicBlock *Loop = createBasicBlock("loop");
+ BasicBlock *LoopEnd = createBasicBlock("loop.end");
+ Builder.CreateCondBr(
+ Builder.CreateICmpEQ(SizeVal,
+ ConstantInt::get(SizeVal->getType(), 0)),
+ LoopEnd, Loop);
+
+ EmitBlock(Loop);
+ PHINode *Offset = Builder.CreatePHI(SizeVal->getType(), 2);
+ Offset->addIncoming(ConstantInt::get(SizeVal->getType(), 0), Entry);
+ Address DestRec = Dest.withPointer(
+ Builder.CreateInBoundsGEP(Int8Ty, Dest.getBasePointer(), {Offset}),
+ KnownNonNull);
+ Address SrcRec = Src.withPointer(
+ Builder.CreateInBoundsGEP(Int8Ty, Src.getBasePointer(), {Offset}),
+ KnownNonNull);
+ for (auto &Field : PFPFields) {
+ if (getContext().arePFPFieldsTriviallyRelocatable(
+ Field.field->getParent()))
+ continue;
+ auto DestFieldPtr = EmitAddressOfPFPField(DestRec, Field);
+ auto SrcFieldPtr = EmitAddressOfPFPField(SrcRec, Field);
+ Builder.CreateStore(Builder.CreateLoad(SrcFieldPtr), DestFieldPtr);
+ }
+
+ Value *NextOffset = Builder.CreateAdd(Offset, TypeSize);
+ Offset->addIncoming(NextOffset, Loop);
+ Builder.CreateCondBr(Builder.CreateICmpEQ(NextOffset, SizeVal), LoopEnd, Loop);
+
+ EmitBlock(LoopEnd);
}
}
return RValue::get(Dest, *this);
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 3457ec0ee8b5d..cd86818a13674 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -5112,6 +5112,7 @@ static Address emitRawAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
const RecordDecl *rec = field->getParent();
+
unsigned idx =
CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
diff --git a/clang/lib/Sema/SemaTypeTraits.cpp b/clang/lib/Sema/SemaTypeTraits.cpp
index c06f53018b338..c171b708b818c 100644
--- a/clang/lib/Sema/SemaTypeTraits.cpp
+++ b/clang/lib/Sema/SemaTypeTraits.cpp
@@ -235,16 +235,16 @@ static bool IsEligibleForReplacement(Sema &SemaRef, const CXXRecordDecl *D) {
static bool IsImplementationDefinedNonRelocatable(Sema &SemaRef,
const CXXRecordDecl *D) {
- // FIXME: Should also check for polymorphic union members here if PAuth ABI is
- // enabled.
-
- // FIXME: PFP should not affect trivial relocatability except in cases where a
- // PFP field is a member of a union, instead it should affect the
- // implementation of std::trivially_relocate. See:
- // https://discourse.llvm.org/t/rfc-structure-protection-a-family-of-uaf-mitigation-techniques/85555/16?u=pcc
- if (!SemaRef.Context.arePFPFieldsTriviallyRelocatable(D) &&
- SemaRef.Context.hasPFPFields(QualType(D->getTypeForDecl(), 0)))
- return true;
+ // The implementation-defined carveout only exists for polymorphic types.
+ if (!D->isPolymorphic())
+ return false;
+
+ std::vector<PFPField> pfpFields;
+ SemaRef.Context.findPFPFields(QualType(D->getTypeForDecl(), 0),
+ CharUnits::Zero(), pfpFields, true);
+ for (PFPField f : pfpFields)
+ if (f.isWithinUnion)
+ return true;
return false;
}
diff --git a/clang/test/CodeGenCXX/pfp-coerce.cpp b/clang/test/CodeGenCXX/pfp-coerce.cpp
index db53f2bf45c70..636d61ccf4858 100644
--- a/clang/test/CodeGenCXX/pfp-coerce.cpp
+++ b/clang/test/CodeGenCXX/pfp-coerce.cpp
@@ -186,7 +186,7 @@ void pass_trivial_abi_pointer(TrivialAbiPointer p, TrivialAbiPointer *pp) {
// X86_64: store ptr %p.coerce, ptr %1, align 8
// X86_64: store ptr %pp, ptr %pp.addr, align 8
// X86_64: %2 = load ptr, ptr %pp.addr, align 8
- // X86_64: %call = call noundef nonnull align 8 dereferenceable(8) ptr @_ZN17TrivialAbiPointeraSERKS_(ptr noundef nonnull align 8 dereferenceable(8) %2, ptr noundef nonnull align 8 dereferenceable(8) %p)
+ // X86_64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %2, ptr align 8 %p, i64 8, i1 false)
// X86_64: call void @_ZN17TrivialAbiPointerD1Ev(ptr noundef nonnull align 8 dereferenceable(8) %p)
*pp = p;
}
@@ -210,7 +210,7 @@ TrivialAbiPointer return_trivial_abi_pointer(TrivialAbiPointer *pp) {
// X86_64: %pp.addr = alloca ptr, align 8
// X86_64: store ptr %pp, ptr %pp.addr, align 8
// X86_64: %0 = load ptr, ptr %pp.addr, align 8
- // X86_64: call void @_ZN17TrivialAbiPointerC1ERKS_(ptr noundef nonnull align 8 dereferenceable(8) %retval, ptr noundef nonnull align 8 dereferenceable(8) %0)
+ // X86_64: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %retval, ptr align 8 %0, i64 8, i1 false)
// X86_64: %1 = getelementptr inbounds i8, ptr %retval, i64 0
// X86_64: %2 = call ptr @llvm.protected.field.ptr(ptr %1, i64 33, i1 false) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS17TrivialAbiPointer.ptr) ]
// X86_64: %3 = load ptr, ptr %2, align 8
diff --git a/clang/test/CodeGenCXX/pfp-memcpy.cpp b/clang/test/CodeGenCXX/pfp-memcpy.cpp
index 3e92db6110ae3..9df63556d71ff 100644
--- a/clang/test/CodeGenCXX/pfp-memcpy.cpp
+++ b/clang/test/CodeGenCXX/pfp-memcpy.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple aarch64-linux -fexperimental-pointer-field-protection=tagged -emit-llvm -o - %s | FileCheck %s
struct ClassWithTrivialCopy {
ClassWithTrivialCopy();
@@ -16,4 +16,4 @@ void make_trivial_copy(ClassWithTrivialCopy *s1, ClassWithTrivialCopy *s2) {
// CHECK-LABEL: define{{.*}} void @_Z17make_trivial_copyP20ClassWithTrivialCopyS0_
// CHECK-NOT: memcpy
-// CHECK: ret void
\ No newline at end of file
+// CHECK: ret void
diff --git a/clang/test/CodeGenCXX/pfp-trivially-relocatable.cpp b/clang/test/CodeGenCXX/pfp-trivially-relocatable.cpp
new file mode 100644
index 0000000000000..e540d2672c84d
--- /dev/null
+++ b/clang/test/CodeGenCXX/pfp-trivially-relocatable.cpp
@@ -0,0 +1,101 @@
+// RUN: %clang_cc1 -std=c++26 -triple x86_64-linux-gnu -emit-llvm -fexperimental-pointer-field-protection=untagged -o - %s | FileCheck --check-prefix=RELOC %s
+// RUN: %clang_cc1 -std=c++26 -triple x86_64-linux-gnu -emit-llvm -fexperimental-pointer-field-protection=tagged -o - %s | FileCheck --check-prefix=RELOC %s
+// RUN: %clang_cc1 -std=c++26 -triple aarch64-linux-gnu -emit-llvm -fexperimental-pointer-field-protection=untagged -o - %s | FileCheck --check-prefix=RELOC %s
+// RUN: %clang_cc1 -std=c++26 -triple aarch64-linux-gnu -emit-llvm -fexperimental-pointer-field-protection=tagged -o - %s | FileCheck --check-prefix=NONRELOC %s
+
+typedef __SIZE_TYPE__ size_t;
+
+struct S trivially_relocatable_if_eligible {
+ S(const S&);
+ ~S();
+ int* a;
+private:
+ int* b;
+};
+
+// CHECK: define dso_local void @_Z5test1P1SS0_(
+void test1(S* source, S* dest) {
+ // RELOC: %0 = load ptr, ptr %dest.addr, align 8
+ // RELOC-NEXT: %1 = load ptr, ptr %source.addr, align 8
+ // RELOC-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 16, i1 false)
+ // RELOC-NOT: @llvm.protected.field.ptr
+
+ // NONRELOC: %0 = load ptr, ptr %dest.addr, align 8
+ // NONRELOC-NEXT: %1 = load ptr, ptr %source.addr, align 8
+ // NONRELOC-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 16, i1 false)
+ // NONRELOC-NEXT: br i1 false, label %loop.end, label %loop
+
+ // NONRELOC: loop:
+ // NONRELOC-NEXT: %2 = phi i64 [ 0, %entry ], [ %19, %loop ]
+ // NONRELOC-NEXT: %3 = getelementptr inbounds i8, ptr %0, i64 %2
+ // NONRELOC-NEXT: %4 = getelementptr inbounds i8, ptr %1, i64 %2
+ // NONRELOC-NEXT: %5 = getelementptr inbounds i8, ptr %3, i64 0
+ // NONRELOC-NEXT: %6 = ptrtoint ptr %3 to i64
+ // NONRELOC-NEXT: %7 = call ptr @llvm.protected.field.ptr(ptr %5, i64 %6, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.a) ]
+ // NONRELOC-NEXT: %8 = getelementptr inbounds i8, ptr %4, i64 0
+ // NONRELOC-NEXT: %9 = ptrtoint ptr %4 to i64
+ // NONRELOC-NEXT: %10 = call ptr @llvm.protected.field.ptr(ptr %8, i64 %9, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.a) ]
+ // NONRELOC-NEXT: %11 = load ptr, ptr %10, align 8
+ // NONRELOC-NEXT: store ptr %11, ptr %7, align 8
+ // NONRELOC-NEXT: %12 = getelementptr inbounds i8, ptr %3, i64 8
+ // NONRELOC-NEXT: %13 = ptrtoint ptr %3 to i64
+ // NONRELOC-NEXT: %14 = call ptr @llvm.protected.field.ptr(ptr %12, i64 %13, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.b) ]
+ // NONRELOC-NEXT: %15 = getelementptr inbounds i8, ptr %4, i64 8
+ // NONRELOC-NEXT: %16 = ptrtoint ptr %4 to i64
+ // NONRELOC-NEXT: %17 = call ptr @llvm.protected.field.ptr(ptr %15, i64 %16, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.b) ]
+ // NONRELOC-NEXT: %18 = load ptr, ptr %17, align 8
+ // NONRELOC-NEXT: store ptr %18, ptr %14, align 8
+ // NONRELOC-NEXT: %19 = add i64 %2, 16
+ // NONRELOC-NEXT: %20 = icmp eq i64 %19, 16
+ // NONRELOC-NEXT: br i1 %20, label %loop.end, label %loop
+
+ // NONRELOC: loop.end:
+ // NONRELOC-NEXT: ret void
+ __builtin_trivially_relocate(dest, source, 1);
+}
+
+// CHECK: define dso_local void @_Z5testNP1SS0_m(
+void testN(S* source, S* dest, size_t count) {
+ // RELOC: %0 = load ptr, ptr %dest.addr, align 8
+ // RELOC-NEXT: %1 = load ptr, ptr %source.addr, align 8
+ // RELOC-NEXT: %2 = load i64, ptr %count.addr, align 8
+ // RELOC-NEXT: %3 = mul i64 %2, 16
+ // RELOC-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 %3, i1 false)
+ // RELOC-NOT: @llvm.protected.field.ptr
+
+ // NONRELOC: %0 = load ptr, ptr %dest.addr, align 8
+ // NONRELOC-NEXT: %1 = load ptr, ptr %source.addr, align 8
+ // NONRELOC-NEXT: %2 = load i64, ptr %count.addr, align 8
+ // NONRELOC-NEXT: %3 = mul i64 %2, 16
+ // NONRELOC-NEXT: call void @llvm.memmove.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 %3, i1 false)
+ // NONRELOC-NEXT: %4 = icmp eq i64 %3, 0
+ // NONRELOC-NEXT: br i1 %4, label %loop.end, label %loop
+
+ // NONRELOC: loop:
+ // NONRELOC-NEXT: %5 = phi i64 [ 0, %entry ], [ %22, %loop ]
+ // NONRELOC-NEXT: %6 = getelementptr inbounds i8, ptr %0, i64 %5
+ // NONRELOC-NEXT: %7 = getelementptr inbounds i8, ptr %1, i64 %5
+ // NONRELOC-NEXT: %8 = getelementptr inbounds i8, ptr %6, i64 0
+ // NONRELOC-NEXT: %9 = ptrtoint ptr %6 to i64
+ // NONRELOC-NEXT: %10 = call ptr @llvm.protected.field.ptr(ptr %8, i64 %9, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.a) ]
+ // NONRELOC-NEXT: %11 = getelementptr inbounds i8, ptr %7, i64 0
+ // NONRELOC-NEXT: %12 = ptrtoint ptr %7 to i64
+ // NONRELOC-NEXT: %13 = call ptr @llvm.protected.field.ptr(ptr %11, i64 %12, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.a) ]
+ // NONRELOC-NEXT: %14 = load ptr, ptr %13, align 8
+ // NONRELOC-NEXT: store ptr %14, ptr %10, align 8
+ // NONRELOC-NEXT: %15 = getelementptr inbounds i8, ptr %6, i64 8
+ // NONRELOC-NEXT: %16 = ptrtoint ptr %6 to i64
+ // NONRELOC-NEXT: %17 = call ptr @llvm.protected.field.ptr(ptr %15, i64 %16, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.b) ]
+ // NONRELOC-NEXT: %18 = getelementptr inbounds i8, ptr %7, i64 8
+ // NONRELOC-NEXT: %19 = ptrtoint ptr %7 to i64
+ // NONRELOC-NEXT: %20 = call ptr @llvm.protected.field.ptr(ptr %18, i64 %19, i1 true) [ "deactivation-symbol"(ptr @__pfp_ds__ZTS1S.b) ]
+ // NONRELOC-NEXT: %21 = load ptr, ptr %20, align 8
+ // NONRELOC-NEXT: store ptr %21, ptr %17, align 8
+ // NONRELOC-NEXT: %22 = add i64 %5, 16
+ // NONRELOC-NEXT: %23 = icmp eq i64 %22, %3
+ // NONRELOC-NEXT: br i1 %23, label %loop.end, label %loop
+
+ // NONRELOC: loop.end:
+ // NONRELOC-NEXT: ret void
+ __builtin_trivially_relocate(dest, source, count);
+};
More information about the llvm-branch-commits
mailing list