[llvm] [llvm] Use computeConstantRange to improve llvm.objectsize computation (PR #115522)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 8 10:14:03 PST 2024
https://github.com/serge-sans-paille updated https://github.com/llvm/llvm-project/pull/115522
>From 22c9145e726b2469e311607256a1c734fcb35ef9 Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton at mozilla.com>
Date: Fri, 8 Nov 2024 12:21:52 +0100
Subject: [PATCH 1/2] [llvm] Fix handling of very large allocation in
llvm.objectsize expansion
The internal structure used to carry intermediate computations hold
signed values. If an object size happens to overflow signed values, we
can get invalid result, so make sure this situation never happens.
This is not very limitative as static allocation of such large values
should scarcely happen.
---
llvm/include/llvm/Analysis/MemoryBuiltins.h | 6 +-
llvm/lib/Analysis/MemoryBuiltins.cpp | 24 ++++++--
.../builtin-object-size-phi.ll | 14 +++++
.../objectsize_basic.ll | 56 +++++++++++++++++++
4 files changed, 93 insertions(+), 7 deletions(-)
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index c3b11cdf5cf5db..e8f43140564509 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -223,6 +223,10 @@ struct SizeOffsetAPInt : public SizeOffsetType<APInt, SizeOffsetAPInt> {
/// OffsetSpan - Used internally by \p ObjectSizeOffsetVisitor. Represents a
/// point in memory as a pair of allocated bytes before and after it.
+///
+/// \c Before and \c After fields are signed values. It makes it possible to
+/// represent out-of-bound access, e.g. as a result of a GEP, at the expense of
+/// not being able to represent very large allocation.
struct OffsetSpan {
APInt Before; /// Number of allocated bytes before this point.
APInt After; /// Number of allocated bytes after this point.
@@ -255,7 +259,7 @@ class ObjectSizeOffsetVisitor
SmallDenseMap<Instruction *, OffsetSpan, 8> SeenInsts;
unsigned InstructionsVisited;
- APInt align(APInt Size, MaybeAlign Align);
+ APInt alignAndCap(APInt Size, MaybeAlign Align);
static OffsetSpan unknown() { return OffsetSpan(); }
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index d9769c48f26560..f5dc1f29db2c4a 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -668,7 +668,13 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");
-APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) {
+/// Align \p Size according to \p Alignment. If \p Size is greater than
+/// getSignedMaxValue(), set it as unknown as we can only represent signed value
+/// in OffsetSpan.
+APInt ObjectSizeOffsetVisitor::alignAndCap(APInt Size, MaybeAlign Alignment) {
+ if (Size.isNegative())
+ return APInt();
+
if (Options.RoundToAlign && Alignment)
return APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment));
return Size;
@@ -780,8 +786,9 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue()))
return ObjectSizeOffsetVisitor::unknown();
APInt Size(IntTyBits, ElemSize.getKnownMinValue());
+
if (!I.isArrayAllocation())
- return OffsetSpan(Zero, align(Size, I.getAlign()));
+ return OffsetSpan(Zero, alignAndCap(Size, I.getAlign()));
Value *ArraySize = I.getArraySize();
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
@@ -791,8 +798,9 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
bool Overflow;
Size = Size.umul_ov(NumElems, Overflow);
+
return Overflow ? ObjectSizeOffsetVisitor::unknown()
- : OffsetSpan(Zero, align(Size, I.getAlign()));
+ : OffsetSpan(Zero, alignAndCap(Size, I.getAlign()));
}
return ObjectSizeOffsetVisitor::unknown();
}
@@ -806,12 +814,16 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy));
- return OffsetSpan(Zero, align(Size, A.getParamAlign()));
+ return OffsetSpan(Zero, alignAndCap(Size, A.getParamAlign()));
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI))
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
+ // Very large unsigned value cannot be represented as OffsetSpan.
+ if (Size->isNegative())
+ return ObjectSizeOffsetVisitor::unknown();
return OffsetSpan(Zero, *Size);
+ }
return ObjectSizeOffsetVisitor::unknown();
}
@@ -852,7 +864,7 @@ OffsetSpan ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV) {
return ObjectSizeOffsetVisitor::unknown();
APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType()));
- return OffsetSpan(Zero, align(Size, GV.getAlign()));
+ return OffsetSpan(Zero, alignAndCap(Size, GV.getAlign()));
}
OffsetSpan ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst &) {
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll
index 2974228e6a8303..fbb37560fb2d0e 100644
--- a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-phi.ll
@@ -118,6 +118,20 @@ if.end:
ret i64 %size
}
+define dso_local i64 @pick_max_large(i1 %c) local_unnamed_addr {
+; CHECK-LABEL: @pick_max_large(
+; CHECK-NEXT: [[BUFFER:%.*]] = alloca i8, i64 -7, align 1
+; CHECK-NEXT: [[S:%.*]] = select i1 [[C:%.*]], ptr null, ptr [[BUFFER]]
+; CHECK-NEXT: ret i64 -1
+;
+ %buffer = alloca i8, i64 -7 ; Actually a very large positive integer
+ %s = select i1 %c, ptr null, ptr %buffer
+ %objsize = tail call i64 @llvm.objectsize.i64.p0(ptr %s, i1 false, i1 false, i1 false)
+ ret i64 %objsize
+
+}
+
+
define i64 @pick_negative_offset(i32 %n) {
; CHECK-LABEL: @pick_negative_offset(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll
index 568070a8660698..ae7399a1eafe52 100644
--- a/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/objectsize_basic.ll
@@ -195,6 +195,62 @@ define i64 @out_of_bound_gep() {
ret i64 %objsize
}
+define i64 @wrapping_gep() {
+; CHECK-LABEL: @wrapping_gep(
+; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1
+; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
+; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 9223372036854775807
+; CHECK-NEXT: ret i64 0
+;
+ %obj = alloca i8, i64 4
+ %slide = getelementptr i8, ptr %obj, i64 9223372036854775807
+ %slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775807
+ %objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false)
+ ret i64 %objsize
+}
+
+define i64 @wrapping_gep_neg() {
+; CHECK-LABEL: @wrapping_gep_neg(
+; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 9223372036854775807, align 1
+; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
+; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 3
+; CHECK-NEXT: [[SLIDE_TER:%.*]] = getelementptr i8, ptr [[SLIDE_BIS]], i64 -4
+; CHECK-NEXT: ret i64 1
+;
+ %obj = alloca i8, i64 9223372036854775807
+ %slide = getelementptr i8, ptr %obj, i64 9223372036854775807
+ %slide.bis = getelementptr i8, ptr %slide, i64 3
+ %slide.ter = getelementptr i8, ptr %slide.bis, i64 -4
+ %objsize = call i64 @llvm.objectsize.i64(ptr %slide.ter, i1 false, i1 false, i1 false)
+ ret i64 %objsize
+}
+
+; We don't analyze allocations larger than platform's ptrdiff_t
+define i64 @large_alloca() {
+; CHECK-LABEL: @large_alloca(
+; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 -9223372036854775808, align 1
+; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
+; CHECK-NEXT: ret i64 -1
+;
+ %obj = alloca i8, i64 9223372036854775808
+ %slide = getelementptr i8, ptr %obj, i64 9223372036854775807
+ %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
+ ret i64 %objsize
+}
+
+; We don't analyze allocations larger than platform's ptrdiff_t
+define i64 @large_malloc() {
+; CHECK-LABEL: @large_malloc(
+; CHECK-NEXT: [[OBJ:%.*]] = call ptr @malloc(i64 -9223372036854775808)
+; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
+; CHECK-NEXT: ret i64 -1
+;
+ %obj = call ptr @malloc(i64 9223372036854775808)
+ %slide = getelementptr i8, ptr %obj, i64 9223372036854775807
+ %objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
+ ret i64 %objsize
+}
+
define i64 @out_of_bound_negative_gep() {
; CHECK-LABEL: @out_of_bound_negative_gep(
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1
>From e6cf87354346f4d4c7e09e399e30d3789108eb03 Mon Sep 17 00:00:00 2001
From: serge-sans-paille <sguelton at mozilla.com>
Date: Sat, 2 Nov 2024 10:02:22 +0100
Subject: [PATCH 2/2] [llvm] Use computeConstantRange to improve
llvm.objectsize computation
Using computeConstantRange, it is possible to compute valuable information for
allocation functions, GEP and alloca, even in the presence of some dynamic
information.
llvm.objectsize plays an important role in _FORTIFY_SOURCE definitions,
so improving its diagnostic in turns improves the security of compiled
application.
As a side note, as a result of recent optimization improvements, clang no
longer passes https://github.com/serge-sans-paille/builtin_object_size-test-suite
This commit restores the situation and greatly improves the scope of
code handled by the static version of __builtin_object_size.
---
llvm/include/llvm/Analysis/MemoryBuiltins.h | 11 +-
llvm/include/llvm/IR/Value.h | 12 ++-
llvm/lib/Analysis/MemoryBuiltins.cpp | 101 +++++++++++++++++-
.../InstCombine/InstructionCombining.cpp | 5 +-
.../Scalar/LowerConstantIntrinsics.cpp | 2 +-
.../builtin-object-size-range.ll | 93 ++++++++++++++++
6 files changed, 213 insertions(+), 11 deletions(-)
create mode 100644 llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index e8f43140564509..5736674ae5dd52 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -32,6 +32,7 @@ class AAResults;
class Argument;
class ConstantPointerNull;
class DataLayout;
+class DominatorTree;
class ExtractElementInst;
class ExtractValueInst;
class GEPOperator;
@@ -160,8 +161,10 @@ struct ObjectSizeOpts {
/// though they can't be evaluated. Otherwise, null is always considered to
/// point to a 0 byte region of memory.
bool NullIsUnknownSize = false;
- /// If set, used for more accurate evaluation
+ /// If set, used for more accurate evaluation.
AAResults *AA = nullptr;
+ /// If set, used for more accurate evaluation.
+ DominatorTree *DT = nullptr;
};
/// Compute the size of the object pointed by Ptr. Returns true and the
@@ -186,6 +189,12 @@ Value *lowerObjectSizeCall(
const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
SmallVectorImpl<Instruction *> *InsertedInstructions = nullptr);
+Value *lowerObjectSizeCall(
+ IntrinsicInst *ObjectSize, const DataLayout &DL,
+ const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT,
+ bool MustSucceed,
+ SmallVectorImpl<Instruction *> *InsertedInstructions = nullptr);
+
/// SizeOffsetType - A base template class for the object size visitors. Used
/// here as a self-documenting way to handle the values rather than using a
/// \p std::pair.
diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 945081b77e9536..d444a768a65436 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -723,12 +723,16 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
- Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
- bool AllowNonInbounds,
- bool AllowInvariantGroup = false) {
+
+ Value *stripAndAccumulateConstantOffsets(
+ const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
+ bool AllowInvariantGroup = false,
+ function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
+ nullptr) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
- DL, Offset, AllowNonInbounds, AllowInvariantGroup));
+ DL, Offset, AllowNonInbounds, AllowInvariantGroup,
+ ExternalAnalysis));
}
/// This is a wrapper around stripAndAccumulateConstantOffsets with the
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index f5dc1f29db2c4a..fdade37e763d50 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
@@ -589,19 +590,28 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
const TargetLibraryInfo *TLI,
bool MustSucceed) {
return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr,
- MustSucceed);
+ /*DT=*/nullptr, MustSucceed);
}
Value *llvm::lowerObjectSizeCall(
IntrinsicInst *ObjectSize, const DataLayout &DL,
const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
SmallVectorImpl<Instruction *> *InsertedInstructions) {
+ return lowerObjectSizeCall(ObjectSize, DL, TLI, AA, /*DT=*/nullptr,
+ MustSucceed, InsertedInstructions);
+}
+
+Value *llvm::lowerObjectSizeCall(
+ IntrinsicInst *ObjectSize, const DataLayout &DL,
+ const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT,
+ bool MustSucceed, SmallVectorImpl<Instruction *> *InsertedInstructions) {
assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize &&
"ObjectSize must be a call to llvm.objectsize!");
bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero();
ObjectSizeOpts EvalOptions;
EvalOptions.AA = AA;
+ EvalOptions.DT = DT;
// Unless we have to fold this to something, try to be as accurate as
// possible.
@@ -714,14 +724,54 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
// readjust the APInt as we pass it upwards in order for the APInt to match
// the type the caller passed in.
APInt Offset(InitialIntTyBits, 0);
+
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
+ // Give it another try with approximated analysis. We don't start with this
+ // one because stripAndAccumulateConstantOffsets behaves differently wrt.
+ // overflows if we provide an external Analysis.
+ if (isa<GEPOperator>(V)) {
+ // External Analysis used to compute the Min/Max value of individual Offsets
+ // within a GEP.
+ auto OffsetRangeAnalysis = [this, V](Value &VOffset, APInt &Offset) {
+ if (auto *C = dyn_cast<ConstantInt>(&VOffset)) {
+ Offset = C->getValue();
+ return true;
+ }
+ if (Options.EvalMode != ObjectSizeOpts::Mode::Min &&
+ Options.EvalMode != ObjectSizeOpts::Mode::Max) {
+ return false;
+ }
+ ConstantRange CR = computeConstantRange(
+ &VOffset, /*ForSigned*/ true, /*UseInstrInfo*/ true, /*AC=*/nullptr,
+ /*CtxtI=*/dyn_cast<Instruction>(V), /*DT=*/Options.DT);
+ if (CR.isFullSet())
+ return false;
+
+ if (Options.EvalMode == ObjectSizeOpts::Mode::Min) {
+ Offset = CR.getSignedMax();
+ // Upper bound actually unknown.
+ if (Offset.isMaxSignedValue())
+ return false;
+ } else {
+ Offset = CR.getSignedMin();
+ // Lower bound actually unknown.
+ if (Offset.isMinSignedValue())
+ return false;
+ }
+ return true;
+ };
+
+ V = V->stripAndAccumulateConstantOffsets(
+ DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
+ /*ExternalAnalysis=*/OffsetRangeAnalysis);
+ }
+
// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);
-
OffsetSpan ORT = computeValue(V);
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
@@ -801,6 +851,26 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return Overflow ? ObjectSizeOffsetVisitor::unknown()
: OffsetSpan(Zero, alignAndCap(Size, I.getAlign()));
+ } else {
+ ConstantRange CR =
+ computeConstantRange(ArraySize, /*ForSigned*/ false,
+ /*UseInstrInfo*/ true, /*AC=*/nullptr,
+ /*CtxtI=*/&I, /*DT=*/Options.DT);
+ if (CR.isFullSet())
+ return ObjectSizeOffsetVisitor::unknown();
+ APInt Bound;
+ if (Options.EvalMode == ObjectSizeOpts::Mode::Max) {
+ Bound = CR.getUnsignedMax();
+ // Upper bound actually unknown.
+ if (Bound.isMaxValue())
+ return ObjectSizeOffsetVisitor::unknown();
+ } else {
+ Bound = CR.getUnsignedMin();
+ // Lower bound actually unknown.
+ if (Bound.isMinValue())
+ return ObjectSizeOffsetVisitor::unknown();
+ }
+ return OffsetSpan(Zero, alignAndCap(Bound, I.getAlign()));
}
return ObjectSizeOffsetVisitor::unknown();
}
@@ -818,7 +888,32 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}
OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
- if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
+ auto Mapper = [&CB, this](const Value *V) -> const Value * {
+ if (!V->getType()->isIntegerTy())
+ return V;
+ if (isa<ConstantInt>(V))
+ return V;
+ ConstantRange CR = computeConstantRange(
+ V, /*ForSigned*/ false, /*UseInstrInfo*/ true, /*AC=*/nullptr,
+ /*CtxtI=*/&CB, /*DT=*/Options.DT);
+ if (CR.isFullSet())
+ return V;
+
+ APInt Bound;
+ if (Options.EvalMode == ObjectSizeOpts::Mode::Max) {
+ Bound = CR.getUnsignedMax();
+ // Upper bound actually unknown.
+ if (Bound.isMaxValue())
+ return V;
+ } else {
+ Bound = CR.getUnsignedMin();
+ // Lower bound actually unknown.
+ if (Bound.isMinValue())
+ return V;
+ }
+ return ConstantInt::get(V->getType(), Bound);
+ };
+ if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 563b45de49836f..16e1f4aaa16718 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3318,8 +3318,9 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::objectsize) {
SmallVector<Instruction *> InsertedInstructions;
- Value *Result = lowerObjectSizeCall(
- II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
+ Value *Result =
+ lowerObjectSizeCall(II, DL, &TLI, AA, &DT,
+ /*MustSucceed=*/true, &InsertedInstructions);
for (Instruction *Inserted : InsertedInstructions)
Worklist.add(Inserted);
replaceInstUsesWith(*I, Result);
diff --git a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
index dcbec927e55703..1b164042502343 100644
--- a/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerConstantIntrinsics.cpp
@@ -143,7 +143,7 @@ bool llvm::lowerConstantIntrinsics(Function &F, const TargetLibraryInfo &TLI,
IsConstantIntrinsicsHandled++;
break;
case Intrinsic::objectsize:
- NewValue = lowerObjectSizeCall(II, DL, &TLI, true);
+ NewValue = lowerObjectSizeCall(II, DL, &TLI, /*AA=*/nullptr, DT, true);
LLVM_DEBUG(dbgs() << "Folding " << *II << " to " << *NewValue << "\n");
ObjectSizeIntrinsicsHandled++;
break;
diff --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
new file mode 100644
index 00000000000000..29632c617974ac
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-range.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
+declare noalias ptr @malloc(i64 noundef) #0
+
+define i64 @select_alloc_size(i1 %cond) {
+; CHECK-LABEL: @select_alloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = alloca i8, i64 %size
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_malloc_size(i1 %cond) {
+; CHECK-LABEL: @select_malloc_size(
+; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %size = select i1 %cond, i64 3, i64 4
+ %ptr = call noalias ptr @malloc(i64 noundef %size)
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offset(i1 %cond) {
+; CHECK-LABEL: @select_gep_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %offset = select i1 %cond, i64 3, i64 4
+ %ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_neg_offset(i1 %cond) {
+; CHECK-LABEL: @select_gep_neg_offset(
+; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
+; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
+; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 9, i64 8
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca i8, i64 10
+ %ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
+ %offset = select i1 %cond, i64 -3, i64 -4
+ %ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+define i64 @select_gep_offsets(i1 %cond) {
+; CHECK-LABEL: @select_gep_offsets(
+; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
+; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
+; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %ptr = alloca [10 x i8], i64 2
+ %offset = select i1 %cond, i32 0, i32 1
+ %ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
+ %objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
+ %objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
+ %res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
+ ret i64 %res
+}
+
+attributes #0 = { nounwind allocsize(0) }
More information about the llvm-commits
mailing list