[llvm] 8762f4c - [InstCombine] Track inserted instructions when lowering objectsize

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 23 06:36:32 PDT 2023


Author: Nikita Popov
Date: 2023-06-23T15:36:23+02:00
New Revision: 8762f4c7489cf4711bdffb34fad66d2b9324f066

URL: https://github.com/llvm/llvm-project/commit/8762f4c7489cf4711bdffb34fad66d2b9324f066
DIFF: https://github.com/llvm/llvm-project/commit/8762f4c7489cf4711bdffb34fad66d2b9324f066.diff

LOG: [InstCombine] Track inserted instructions when lowering objectsize

The inserted instructions can usually be simplified. Make sure this
happens in the same InstCombine iteration by adding them to the
worklist.

We happen to get some better optimization in two cases, but this is
just a lucky accident. https://github.com/llvm/llvm-project/issues/63472
tracks implementing a fold for that case.

This doesn't track all inserted instructions yet, for that we would
also have to include those created by ObjectSizeOffsetEvaluator.

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MemoryBuiltins.h
    llvm/lib/Analysis/MemoryBuiltins.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 146781515abaa..711bbf6a0afe5 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -182,9 +182,10 @@ bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
 /// argument of the call to objectsize.
 Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
                            const TargetLibraryInfo *TLI, bool MustSucceed);
-Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
-                           const TargetLibraryInfo *TLI, AAResults *AA,
-                           bool MustSucceed);
+Value *lowerObjectSizeCall(
+    IntrinsicInst *ObjectSize, const DataLayout &DL,
+    const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
+    SmallVectorImpl<Instruction *> *InsertedInstructions = nullptr);
 
 using SizeOffsetType = std::pair<APInt, APInt>;
 

diff  --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 6ff7aed0288a4..6b2e4425ade0b 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -602,10 +602,10 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
                              MustSucceed);
 }
 
-Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
-                                 const DataLayout &DL,
-                                 const TargetLibraryInfo *TLI, AAResults *AA,
-                                 bool MustSucceed) {
+Value *llvm::lowerObjectSizeCall(
+    IntrinsicInst *ObjectSize, const DataLayout &DL,
+    const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
+    SmallVectorImpl<Instruction *> *InsertedInstructions) {
   assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize &&
          "ObjectSize must be a call to llvm.objectsize!");
 
@@ -640,7 +640,11 @@ Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
         Eval.compute(ObjectSize->getArgOperand(0));
 
     if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) {
-      IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL));
+      IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
+          Ctx, TargetFolder(DL), IRBuilderCallbackInserter([&](Instruction *I) {
+            if (InsertedInstructions)
+              InsertedInstructions->push_back(I);
+          }));
       Builder.SetInsertPoint(ObjectSize);
 
       // If we've outside the end of the object, then we can always access

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index ea35e7934f016..7aa4855f22316 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1488,10 +1488,16 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
 
   Intrinsic::ID IID = II->getIntrinsicID();
   switch (IID) {
-  case Intrinsic::objectsize:
-    if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false))
+  case Intrinsic::objectsize: {
+    SmallVector<Instruction *> InsertedInstructions;
+    if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false,
+                                       &InsertedInstructions)) {
+      for (Instruction *Inserted : InsertedInstructions)
+        Worklist.add(Inserted);
       return replaceInstUsesWith(CI, V);
+    }
     return nullptr;
+  }
   case Intrinsic::abs: {
     Value *IIOperand = II->getArgOperand(0);
     bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();

diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index ad44cfdf358b5..0fde841a0e973 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2397,8 +2397,11 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
 
       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
         if (II->getIntrinsicID() == Intrinsic::objectsize) {
-          Value *Result =
-              lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/true);
+          SmallVector<Instruction *> InsertedInstructions;
+          Value *Result = lowerObjectSizeCall(
+              II, DL, &TLI, AA, /*MustSucceed=*/true, &InsertedInstructions);
+          for (Instruction *Inserted : InsertedInstructions)
+            Worklist.add(Inserted);
           replaceInstUsesWith(*I, Result);
           eraseInstFromFunction(*I);
           Users[i] = nullptr; // Skip examining in the next loop.

diff  --git a/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll b/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll
index 2e19260b2ced9..e5a1b834c3b4f 100644
--- a/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll
+++ b/llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
-; RUN: opt -passes=instcombine -S < %s | FileCheck %s
+; RUN: opt -passes=instcombine -instcombine-infinite-loop-threshold=2 -S < %s | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128-p7:32:32"
 target triple = "x86_64-apple-macosx10.14.0"
@@ -212,13 +212,8 @@ define i64 @as_cast(i1 %c) {
 ; CHECK-LABEL: define i64 @as_cast
 ; CHECK-SAME: (i1 [[C:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = select i1 [[C]], i64 64, i64 1
-; CHECK-NEXT:    [[NOT_C:%.*]] = xor i1 [[C]], true
-; CHECK-NEXT:    [[DOTNEG:%.*]] = sext i1 [[NOT_C]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i64 [[TMP0]], [[DOTNEG]]
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], -1
-; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = select i1 [[C]], i64 64, i64 0
+; CHECK-NEXT:    ret i64 [[TMP0]]
 ;
 entry:
   %p0 = tail call ptr @malloc(i64 64)
@@ -233,13 +228,8 @@ define i64 @constexpr_as_cast(i1 %c) {
 ; CHECK-LABEL: define i64 @constexpr_as_cast
 ; CHECK-SAME: (i1 [[C:%.*]]) {
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = select i1 [[C]], i64 64, i64 1
-; CHECK-NEXT:    [[NOT_C:%.*]] = xor i1 [[C]], true
-; CHECK-NEXT:    [[DOTNEG:%.*]] = sext i1 [[NOT_C]] to i64
-; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i64 [[TMP0]], [[DOTNEG]]
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[TMP1]], -1
-; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP1]]
+; CHECK-NEXT:    [[TMP0:%.*]] = select i1 [[C]], i64 64, i64 0
+; CHECK-NEXT:    ret i64 [[TMP0]]
 ;
 entry:
   %p0 = tail call ptr @malloc(i64 64)


        


More information about the llvm-commits mailing list