[llvm] aa15ea4 - [builtin_object_size] Basic support for posix_memalign

via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 8 00:32:03 PDT 2022


Author: serge-sans-paille
Date: 2022-04-08T09:31:11+02:00
New Revision: aa15ea47e20fbce5814f4a0d69521f8270b70e69

URL: https://github.com/llvm/llvm-project/commit/aa15ea47e20fbce5814f4a0d69521f8270b70e69
DIFF: https://github.com/llvm/llvm-project/commit/aa15ea47e20fbce5814f4a0d69521f8270b70e69.diff

LOG: [builtin_object_size] Basic support for posix_memalign

It actually implements support for seeing through loads, using alias analysis to
refine the result.

This is rather limited, but I didn't want to rely on more than available
analysis at that point (to be gentle with compilation time), and it does seem to
catch common scenario, as showcased by the included tests.

Differential Revision: https://reviews.llvm.org/D122431

Added: 
    llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll
    llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll

Modified: 
    llvm/include/llvm/Analysis/MemoryBuiltins.h
    llvm/lib/Analysis/MemoryBuiltins.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index f8fba19620537..814543e3c409b 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -28,6 +28,7 @@
 namespace llvm {
 
 class AllocaInst;
+class AAResults;
 class Argument;
 class CallInst;
 class ConstantPointerNull;
@@ -152,6 +153,8 @@ struct ObjectSizeOpts {
   /// though they can't be evaluated. Otherwise, null is always considered to
   /// point to a 0 byte region of memory.
   bool NullIsUnknownSize = false;
+  /// If set, used for more accurate evaluation
+  AAResults *AA = nullptr;
 };
 
 /// Compute the size of the object pointed by Ptr. Returns true and the
@@ -171,8 +174,9 @@ bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
 /// argument of the call to objectsize.
 Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
                            const TargetLibraryInfo *TLI, bool MustSucceed);
-
-
+Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
+                           const TargetLibraryInfo *TLI, AAResults *AA,
+                           bool MustSucceed);
 
 using SizeOffsetType = std::pair<APInt, APInt>;
 
@@ -229,6 +233,10 @@ class ObjectSizeOffsetVisitor
   SizeOffsetType visitInstruction(Instruction &I);
 
 private:
+  SizeOffsetType findLoadSizeOffset(
+      LoadInst &LoadFrom, BasicBlock &BB, BasicBlock::iterator From,
+      SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks,
+      unsigned &ScannedInstCount);
   SizeOffsetType combineSizeOffset(SizeOffsetType LHS, SizeOffsetType RHS);
   SizeOffsetType computeImpl(Value *V);
   bool CheckedZextOrTrunc(APInt &I);

diff  --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 4db57957453b5..aebe82f71dc43 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -17,6 +17,7 @@
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/TargetFolder.h"
 #include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/Utils/Local.h"
@@ -153,7 +154,6 @@ static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
     {LibFunc_strndup,                           {StrDupLike,       2,  1, -1, -1, MallocFamily::Malloc}},
     {LibFunc_dunder_strndup,                    {StrDupLike,       2,  1, -1, -1, MallocFamily::Malloc}},
     {LibFunc___kmpc_alloc_shared,               {MallocLike,       1,  0, -1, -1, MallocFamily::KmpcAllocShared}},
-    // TODO: Handle "int posix_memalign(void **, size_t, size_t)"
 };
 // clang-format on
 
@@ -569,11 +569,21 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
                                  const DataLayout &DL,
                                  const TargetLibraryInfo *TLI,
                                  bool MustSucceed) {
+  return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr,
+                             MustSucceed);
+}
+
+Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
+                                 const DataLayout &DL,
+                                 const TargetLibraryInfo *TLI, AAResults *AA,
+                                 bool MustSucceed) {
   assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize &&
          "ObjectSize must be a call to llvm.objectsize!");
 
   bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero();
   ObjectSizeOpts EvalOptions;
+  EvalOptions.AA = AA;
+
   // Unless we have to fold this to something, try to be as accurate as
   // possible.
   if (MustSucceed)
@@ -803,9 +813,130 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) {
   return unknown();
 }
 
-SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst&) {
-  ++ObjectVisitorLoad;
-  return unknown();
+SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset(
+    LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From,
+    SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks,
+    unsigned &ScannedInstCount) {
+  constexpr unsigned MaxInstsToScan = 128;
+
+  auto Where = VisitedBlocks.find(&BB);
+  if (Where != VisitedBlocks.end())
+    return Where->second;
+
+  auto Unknown = [this, &BB, &VisitedBlocks]() {
+    return VisitedBlocks[&BB] = unknown();
+  };
+  auto Known = [this, &BB, &VisitedBlocks](SizeOffsetType SO) {
+    return VisitedBlocks[&BB] = SO;
+  };
+
+  do {
+    Instruction &I = *From;
+
+    if (I.isDebugOrPseudoInst())
+      continue;
+
+    if (++ScannedInstCount > MaxInstsToScan)
+      return Unknown();
+
+    if (!I.mayWriteToMemory())
+      continue;
+
+    if (auto *SI = dyn_cast<StoreInst>(&I)) {
+      AliasResult AR =
+          Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand());
+      switch ((AliasResult::Kind)AR) {
+      case AliasResult::NoAlias:
+        continue;
+      case AliasResult::MustAlias:
+        if (SI->getValueOperand()->getType()->isPointerTy())
+          return Known(compute(SI->getValueOperand()));
+        else
+          return Unknown(); // No handling of non-pointer values by `compute`.
+      default:
+        return Unknown();
+      }
+    }
+
+    if (auto *CB = dyn_cast<CallBase>(&I)) {
+      Function *Callee = CB->getCalledFunction();
+      // Bail out on indirect call.
+      if (!Callee)
+        return Unknown();
+
+      LibFunc TLIFn;
+      if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) ||
+          !TLI->has(TLIFn))
+        return Unknown();
+
+      // TODO: There's probably more interesting case to support here.
+      if (TLIFn != LibFunc_posix_memalign)
+        return Unknown();
+
+      AliasResult AR =
+          Options.AA->alias(CB->getOperand(0), Load.getPointerOperand());
+      switch ((AliasResult::Kind)AR) {
+      case AliasResult::NoAlias:
+        continue;
+      case AliasResult::MustAlias:
+        break;
+      default:
+        return Unknown();
+      }
+
+      // Is the error status of posix_memalign correctly checked? If not it
+      // would be incorrect to assume it succeeds and load doesn't see the
+      // previous value.
+      Optional<bool> Checked = isImpliedByDomCondition(
+          ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL);
+      if (!Checked || !*Checked)
+        return Unknown();
+
+      Value *Size = CB->getOperand(2);
+      auto *C = dyn_cast<ConstantInt>(Size);
+      if (!C)
+        return Unknown();
+
+      return Known({C->getValue(), APInt(C->getValue().getBitWidth(), 0)});
+    }
+
+    return Unknown();
+  } while (From-- != BB.begin());
+
+  SmallVector<SizeOffsetType> PredecessorSizeOffsets;
+  for (auto *PredBB : predecessors(&BB)) {
+    PredecessorSizeOffsets.push_back(findLoadSizeOffset(
+        Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()),
+        VisitedBlocks, ScannedInstCount));
+    if (!bothKnown(PredecessorSizeOffsets.back()))
+      return Unknown();
+  }
+
+  if (PredecessorSizeOffsets.empty())
+    return Unknown();
+
+  return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1,
+                               PredecessorSizeOffsets.end(),
+                               PredecessorSizeOffsets.front(),
+                               [this](SizeOffsetType LHS, SizeOffsetType RHS) {
+                                 return combineSizeOffset(LHS, RHS);
+                               }));
+}
+
+SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) {
+  if (!Options.AA) {
+    ++ObjectVisitorLoad;
+    return unknown();
+  }
+
+  SmallDenseMap<BasicBlock *, SizeOffsetType, 8> VisitedBlocks;
+  unsigned ScannedInstCount = 0;
+  SizeOffsetType SO =
+      findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI),
+                         VisitedBlocks, ScannedInstCount);
+  if (!bothKnown(SO))
+    ++ObjectVisitorLoad;
+  return SO;
 }
 
 SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS,
@@ -1010,7 +1141,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) {
   return unknown();
 }
 
-SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst&) {
+SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) {
   return unknown();
 }
 

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 6163b40e15173..d9a8b3726fe2d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1188,7 +1188,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
   Intrinsic::ID IID = II->getIntrinsicID();
   switch (IID) {
   case Intrinsic::objectsize:
-    if (Value *V = lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/false))
+    if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false))
       return replaceInstUsesWith(CI, V);
     return nullptr;
   case Intrinsic::abs: {

diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 51ef4b11ddc8e..b9957a947392c 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2810,7 +2810,7 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
         if (II->getIntrinsicID() == Intrinsic::objectsize) {
           Value *Result =
-              lowerObjectSizeCall(II, DL, &TLI, /*MustSucceed=*/true);
+              lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/true);
           replaceInstUsesWith(*I, Result);
           eraseInstFromFunction(*I);
           Users[i] = nullptr; // Skip examining in the next loop.

diff  --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll
new file mode 100644
index 0000000000000..dc3738cdada3e
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-load.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt --instcombine -lower-constant-intrinsics -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg)
+
+
+define dso_local i64 @check_store_load(i1 %cond) local_unnamed_addr {
+; CHECK-LABEL: @check_store_load(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTR01:%.*]] = alloca [10 x i8], align 1
+; CHECK-NEXT:    br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    [[PTR01_SUB:%.*]] = getelementptr inbounds [10 x i8], [10 x i8]* [[PTR01]], i64 0, i64 0
+; CHECK-NEXT:    br label [[RETURN:%.*]]
+; CHECK:       if.end:
+; CHECK-NEXT:    [[PTR12:%.*]] = alloca [12 x i8], align 1
+; CHECK-NEXT:    [[PTR12_SUB:%.*]] = getelementptr inbounds [12 x i8], [12 x i8]* [[PTR12]], i64 0, i64 0
+; CHECK-NEXT:    br label [[RETURN]]
+; CHECK:       return:
+; CHECK-NEXT:    [[STOREMERGE:%.*]] = phi i8* [ [[PTR12_SUB]], [[IF_END]] ], [ [[PTR01_SUB]], [[IF_THEN]] ]
+; CHECK-NEXT:    ret i64 12
+;
+entry:
+  %holder = alloca i8*
+  %ptr0 = alloca i8, i64 10
+  br i1 %cond, label %if.then, label %if.end
+
+if.then:
+  store i8* %ptr0, i8** %holder
+  br label %return
+
+if.end:
+  %ptr1 = alloca i8, i64 12
+  store i8* %ptr1, i8** %holder
+  br label %return
+
+return:
+  %held = load i8*, i8** %holder
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %held, i1 false, i1 true, i1 false)
+  ret i64 %objsize
+
+}

diff  --git a/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll
new file mode 100644
index 0000000000000..365c3b66558f5
--- /dev/null
+++ b/llvm/test/Transforms/LowerConstantIntrinsics/builtin-object-size-posix-memalign.ll
@@ -0,0 +1,219 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt --instcombine -lower-constant-intrinsics -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare dso_local i32 @posix_memalign(i8** noundef, i64 noundef, i64 noundef)
+declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg)
+
+; Check posix_memalign call with proper handlig of return value
+define dso_local i64 @check_posix_memalign(i32 noundef %n) local_unnamed_addr {
+; CHECK-LABEL: @check_posix_memalign(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %obj = alloca i8*
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp ne i32 %call, 0
+  br i1 %tobool, label %exit, label %cond.false
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ]
+  ret i64 %cond
+
+}
+
+
+; Same test case as above but with idiomatic NULL initialization
+define dso_local i64 @check_posix_memalign_null() {
+; CHECK-LABEL: @check_posix_memalign_null(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    store i8* null, i8** [[OBJ]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %obj = alloca i8*
+  store i8* null, i8** %obj
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp ne i32 %call, 0
+  br i1 %tobool, label %exit, label %cond.false
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ]
+  ret i64 %cond
+}
+
+; Using argument storage instead of local storage for the allocated pointer.
+define dso_local i64 @check_posix_memalign_arg(i8** noalias noundef %obj) {
+; CHECK-LABEL: @check_posix_memalign_arg(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef [[OBJ:%.*]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ 10, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp ne i32 %call, 0
+  br i1 %tobool, label %exit, label %cond.false
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ]
+  ret i64 %cond
+
+}
+
+; posix_memalign can fail, in that case no object_size can be guessed.
+define dso_local i64 @check_posix_memalign_unchecked() {
+; CHECK-LABEL: @check_posix_memalign_unchecked(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8
+; CHECK-NEXT:    ret i64 -1
+;
+entry:
+  %obj = alloca i8*
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  ret i64 %objsize
+}
+
+; Checks that bo upon posix_memalign failure behaves correctly
+define dso_local i64 @check_posix_memalign_inverted_cond() {
+; CHECK-LABEL: @check_posix_memalign_inverted_cond(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    store i8* null, i8** [[OBJ]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL]], label [[EXIT:%.*]], label [[COND_FALSE:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ -1, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %obj = alloca i8*
+  store i8* null, i8** %obj
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp eq i32 %call, 0
+  br i1 %tobool, label %exit, label %cond.false
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ]
+  ret i64 %cond
+}
+
+; Check posix_memalign call with a runtime condition check
+define dso_local i64 @check_posix_memalign_runtime_cond(i32 noundef %n) local_unnamed_addr {
+; CHECK-LABEL: @check_posix_memalign_runtime_cond(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], [[N:%.*]]
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[EXIT:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    [[VAL:%.*]] = load i8*, i8** [[OBJ]], align 8
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[ENTRY:%.*]] ], [ -1, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %obj = alloca i8*
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp ne i32 %call, %n
+  br i1 %tobool, label %exit, label %cond.false
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %entry ], [ %objsize, %cond.false ]
+  ret i64 %cond
+
+}
+
+; Check posix_memalign call with two 
diff erent paths leading to the same alloc.
+define dso_local i64 @check_posix_memalign_diamond() local_unnamed_addr {
+; CHECK-LABEL: @check_posix_memalign_diamond(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[OBJ:%.*]] = alloca i8*, align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 @posix_memalign(i8** noundef nonnull [[OBJ]], i64 noundef 8, i64 noundef 10)
+; CHECK-NEXT:    [[TOBOOL_NOT:%.*]] = icmp eq i32 [[CALL]], 0
+; CHECK-NEXT:    br i1 [[TOBOOL_NOT]], label [[COND_FALSE:%.*]], label [[COND_TRUE:%.*]]
+; CHECK:       cond.true:
+; CHECK-NEXT:    br label [[EXIT:%.*]]
+; CHECK:       cond.false:
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[COND:%.*]] = phi i64 [ -2, [[COND_TRUE]] ], [ 10, [[COND_FALSE]] ]
+; CHECK-NEXT:    ret i64 [[COND]]
+;
+entry:
+  %obj = alloca i8*
+  %call = call i32 @posix_memalign(i8** noundef %obj, i64 noundef 8, i64 noundef 10)
+  %tobool = icmp ne i32 %call, 0
+  br i1 %tobool, label %cond.true, label %cond.false
+
+cond.true:
+  br label %exit
+
+cond.false:
+  %val = load i8*, i8** %obj
+  %objsize = call i64 @llvm.objectsize.i64.p0i8(i8* %val, i1 false, i1 true, i1 false)
+  br label %exit
+
+exit:
+  %cond = phi i64 [ -2, %cond.true ], [ %objsize, %cond.false ]
+  ret i64 %cond
+
+}


        


More information about the llvm-commits mailing list