[flang-commits] [flang] [llvm] [llvm] Improve llvm.objectsize computation by computing GEP, alloca a… (PR #117849)
via flang-commits
flang-commits at lists.llvm.org
Tue Nov 26 22:16:45 PST 2024
https://github.com/serge-sans-paille created https://github.com/llvm/llvm-project/pull/117849
…nd malloc parameters bound
Using a naive expression walker, it is possible to compute valuable information for allocation functions, GEP and alloca, even in the presence of some dynamic information.
We don't rely on computeConstantRange to avoid taking advantage of undefined behavior, which would be counter-productive wrt. usual llvm.objectsize usage.
llvm.objectsize plays an important role in _FORTIFY_SOURCE definitions, so improving its diagnostic in turns improves the security of compiled application.
As a side note, as a result of recent optimization improvements, clang no longer passes https://github.com/serge-sans-paille/builtin_object_size-test-suite This commit restores the situation and greatly improves the scope of code handled by the static version of __builtin_object_size.
This is a recommit of https://github.com/llvm/llvm-project/pull/115522 with fix applied.
>From 2f1082babfc8d4a234883c0a6157c2ebcb820155 Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton at mozilla.com>
Date: Tue, 12 Nov 2024 20:44:22 +0100
Subject: [PATCH] [llvm] Improve llvm.objectsize computation by computing GEP,
alloca and malloc parameters bound
Using a naive expression walker, it is possible to compute valuable information for
allocation functions, GEP and alloca, even in the presence of some dynamic
information.
We don't rely on computeConstantRange to avoid taking advantage of
undefined behavior, which would be counter-productive wrt. usual
llvm.objectsize usage.
llvm.objectsize plays an important role in _FORTIFY_SOURCE definitions,
so improving its diagnostic in turns improves the security of compiled
application.
As a side note, as a result of recent optimization improvements, clang no
longer passes https://github.com/serge-sans-paille/builtin_object_size-test-suite
This commit restores the situation and greatly improves the scope of
code handled by the static version of __builtin_object_size.
---
flang/include/flang/Runtime/descriptor.h | 1 +
flang/runtime/io-api-minimal.cpp | 2 +-
llvm/include/llvm/IR/Value.h | 12 +-
llvm/lib/Analysis/MemoryBuiltins.cpp | 107 ++++++++++++++++-
.../builtin-object-size-range.ll | 109 ++++++++++++++++++
5 files changed, 222 insertions(+), 9 deletions(-)
create mode 100644 llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
diff --git a/flang/include/flang/Runtime/descriptor.h b/flang/include/flang/Runtime/descriptor.h
index 030d0c1031fbaa..d674a8e2229a04 100644
--- a/flang/include/flang/Runtime/descriptor.h
+++ b/flang/include/flang/Runtime/descriptor.h
@@ -467,6 +467,7 @@ class alignas(Descriptor) StaticDescriptor {
RT_OFFLOAD_VAR_GROUP_END
RT_API_ATTRS Descriptor &descriptor() {
+
return *reinterpret_cast<Descriptor *>(storage_);
}
RT_API_ATTRS const Descriptor &descriptor() const {
diff --git a/flang/runtime/io-api-minimal.cpp b/flang/runtime/io-api-minimal.cpp
index ad76fe3de0324c..a06d3d12f8f005 100644
--- a/flang/runtime/io-api-minimal.cpp
+++ b/flang/runtime/io-api-minimal.cpp
@@ -150,7 +150,7 @@ bool IODEF(OutputLogical)(Cookie cookie, bool truth) {
// Provide own definition for `std::__libcpp_verbose_abort` to avoid dependency
// on the version provided by libc++.
-void std::__libcpp_verbose_abort(char const *format, ...) {
+void std::__libcpp_verbose_abort(char const *format, ...) noexcept {
va_list list;
va_start(list, format);
std::vfprintf(stderr, format, list);
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 945081b77e9536..d444a768a65436 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,12 +723,16 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
- Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
- bool AllowNonInbounds,
- bool AllowInvariantGroup = false) {
+
+ Value *stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup = false,
+ function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
+ nullptr) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup,
+ ExternalAnalysis));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 4028d5f4e2e1b8..93dfdfadff11be 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -670,6 +670,68 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
+static std::optional<APInt>
+combinePossibleConstantValues(std::optional<APInt> LHS,
+ std::optional<APInt> RHS,
+ ObjectSizeOpts::Mode EvalMode) {
+ if (!LHS || !RHS)
+ return std::nullopt;
+ if (EvalMode == ObjectSizeOpts::Mode::Max)
+ return LHS->sge(*RHS) ? *LHS : *RHS;
+ else
+ return LHS->sle(*RHS) ? *LHS : *RHS;
+}
+
+static std::optional<APInt> aggregatePossibleConstantValuesImpl(
+ const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
+ constexpr unsigned maxRecursionDepth = 4;
+ if (recursionDepth == maxRecursionDepth)
+ return std::nullopt;
+
+ if (const auto *CI = dyn_cast<ConstantInt>(V)) {
+ return CI->getValue();
+ }
+
+ else if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ return combinePossibleConstantValues(
+ aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
+ recursionDepth + 1),
+ aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
+ recursionDepth + 1),
+ EvalMode);
+ }
+ else if (const auto *PN = dyn_cast<PHINode>(V)) {
+ unsigned Count = PN->getNumIncomingValues();
+ if (Count == 0)
+ return std::nullopt;
+ auto Acc = aggregatePossibleConstantValuesImpl(
+ PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
+ for (unsigned I = 1; Acc && I < Count; ++I) {
+ auto Tmp = aggregatePossibleConstantValuesImpl(
+ PN->getIncomingValue(I), EvalMode, recursionDepth + 1);
+ Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
+ }
+ return Acc;
+ }
+
+ return std::nullopt;
+}
+
+static std::optional<APInt>
+aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
+ if (auto *CI = dyn_cast<ConstantInt>(V))
+ return CI->getValue();
+
+ if (EvalMode != ObjectSizeOpts::Mode::Min &&
+ EvalMode != ObjectSizeOpts::Mode::Max)
+ return std::nullopt;
+
+ // Not using computeConstantRange here because we cannot guarantee it's not
+ // doing optimization based on UB which we want to avoid when expanding
+ // __builtin_object_size.
+ return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
+}
+
/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
@@ -717,11 +779,36 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
+ // Give it another try with approximated analysis. We don't start with this
+ // one because stripAndAccumulateConstantOffsets behaves differently wrt.
+ // overflows if we provide an external Analysis.
+ if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
+ Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
+ isa<GEPOperator>(V)) {
+ // External Analysis used to compute the Min/Max value of individual Offsets
+ // within a GEP.
+ ObjectSizeOpts::Mode EvalMode =
+ Options.EvalMode == ObjectSizeOpts::Mode::Min
+ ? ObjectSizeOpts::Mode::Max
+ : ObjectSizeOpts::Mode::Min;
+ auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
+ if (auto PossibleOffset =
+ aggregatePossibleConstantValues(&VOffset, EvalMode)) {
+ Offset = *PossibleOffset;
+ return true;
+ }
+ return false;
+ };
+
+ V = V->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
+ /*ExternalAnalysis=*/OffsetRangeAnalysis);
+ }
+
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
-
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -813,8 +900,9 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
- if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
- APInt NumElems = C->getValue();
+ if (auto PossibleSize =
+ aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
+ APInt NumElems = *PossibleSize;
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();
@@ -840,7 +928,18 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
+ auto Mapper = [this](const Value *V) -> const Value * {
+ if (!V->getType()->isIntegerTy())
+ return V;
+
+ if (auto PossibleBound =
+ aggregatePossibleConstantValues(V, Options.EvalMode))
+ return ConstantInt::get(V->getType(), *PossibleBound);
+
+ return V;
+ };
+
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
new file mode 100644
index 00000000000000..f84ebee1442893
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
+declare noalias ptr @malloc(i64 noundef) #0
+
+define i64 @select_alloc_size(i1 %cond) {
+; CHECK-LABEL: @select_alloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = alloca i8, i64 %size
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_malloc_size(i1 %cond) {
+; CHECK-LABEL: @select_malloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = call noalias ptr @malloc(i64 noundef %size)
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offset(i1 %cond) {
+; CHECK-LABEL: @select_gep_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %offset = select i1 %cond, i64 3, i64 4
+ %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
+; CHECK-LABEL: @select_gep_neg_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
+; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
+ %offset = select i1 %c0, i64 -3, i64 -4
+ %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
+ %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
+; CHECK-LABEL: @select_neg_oob_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
+; CHECK-NEXT: ret i64 0
+;
+ %ptr = alloca i8, i64 10
+ %offset = select i1 %c0, i64 -3, i64 -4
+ %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offsets(i1 %cond) {
+; CHECK-LABEL: @select_gep_offsets(
+; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca [10 x i8], i64 2
+ %offset = select i1 %cond, i32 0, i32 1
+ %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+attributes #0 = { nounwind allocsize(0) }
More information about the flang-commits
mailing list