[llvm] [InstCombine] Don't consider aligned_alloc removable if icmp uses result (PR #69474)

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 19 08:43:57 PDT 2023


https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/69474

>From 7456fc08655ff1c8f40118f8bdd333e07c4e2cc5 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 18 Oct 2023 16:12:35 +0100
Subject: [PATCH 1/2] [InstCombine] Don't consider aligned_alloc removable if
 icmp uses result

At the moment, all alloc-like functions are assumed to return non-null
pointers, if their return value is only used in a compare. This is
based on being allowed to substitute the allocation function with one
that doesn't fail to allocate the required memory.

aligned_alloc however must also return null if the required alignment
cannot be satisfied, so I don't think the same reasoning as above can be
applied to it.

This patch adds a bail-out for aligned_alloc calls to
isAllocSiteRemovable.
---
 llvm/include/llvm/Analysis/MemoryBuiltins.h   |  5 ++++
 llvm/lib/Analysis/MemoryBuiltins.cpp          |  2 +-
 .../InstCombine/InstructionCombining.cpp      | 24 +++++++++++++++++++
 .../Transforms/InstCombine/malloc-free.ll     | 16 +++++++++----
 4 files changed, 41 insertions(+), 6 deletions(-)

diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index 827b5081b2ce755..cf18d9fcd95869d 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -131,6 +131,11 @@ Constant *getInitialValueOfAllocation(const Value *V,
 std::optional<StringRef> getAllocationFamily(const Value *I,
                                              const TargetLibraryInfo *TLI);
 
+/// If \p V is a call of a function part of an allocation family e.g.
+/// malloc/realloc/calloc/free), return the allocation function kind the called
+/// function.
+AllocFnKind getAllocFnKind(const Value *V);
+
 //===----------------------------------------------------------------------===//
 //  Utility functions to compute size of objects.
 //
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 6ee1984f908b8a1..e52572dbbe5461a 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -272,7 +272,7 @@ getAllocationSize(const Value *V, const TargetLibraryInfo *TLI) {
   return Result;
 }
 
-static AllocFnKind getAllocFnKind(const Value *V) {
+AllocFnKind llvm::getAllocFnKind(const Value *V) {
   if (const auto *CB = dyn_cast<CallBase>(V)) {
     Attribute Attr = CB->getFnAttr(Attribute::AllocKind);
     if (Attr.isValid())
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 8a6f66e36bd80e9..1b5c2e8abf62b97 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2430,6 +2430,30 @@ static bool isAllocSiteRemovable(Instruction *AI,
         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
           return false;
+        // Do not fold compares to aligned_alloc calls, as they may have to
+        // return null in case the required alignment cannot be satisfied,
+        // unless we can prove that both alignment and size are valid.
+        if ((getAllocFnKind(AI) & AllocFnKind::Aligned) !=
+            AllocFnKind::Unknown) {
+          auto *CB = cast<CallBase>(AI);
+          Function &F = *CB->getCalledFunction();
+          LibFunc TheLibFunc;
+
+          // Check if alignment and size of a call to aligned_alloc is valid,
+          // that is alignment is a power-of-2 and the size is a multiple of the
+          // alignment.
+          auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
+            const APInt *Alignment;
+            const APInt *Size;
+            return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
+                   match(CB->getArgOperand(1), m_APInt(Size)) &&
+                   Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
+          };
+          if (TLI.getLibFunc(F, TheLibFunc) && TLI.has(TheLibFunc) &&
+              TheLibFunc == LibFunc_aligned_alloc &&
+              !AlignmentAndSizeKnownValid(CB))
+            return false;
+        }
         Users.emplace_back(I);
         continue;
       }
diff --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll
index b77f70f239921e8..10725950a1c7373 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free.ll
@@ -26,9 +26,11 @@ define i32 @dead_aligned_alloc(i32 %size, i32 %alignment, i8 %value) {
   ret i32 0
 }
 
-define i1 @aligned_alloc_pointer_only_used_by_cmp(i32 %size, i32 %alignment, i8 %value) {
-; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp(
-; CHECK-NEXT:    ret i1 true
+define i1 @aligned_alloc_only_pointe(i32 %size, i32 %alignment, i8 %value) {
+; CHECK-LABEL: @aligned_alloc_only_pointe(
+; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call ptr @aligned_alloc(i32 [[ALIGNMENT:%.*]], i32 [[SIZE:%.*]])
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %aligned_allocation = tail call ptr @aligned_alloc(i32 %alignment, i32 %size)
   %cmp = icmp ne ptr %aligned_allocation, null
@@ -46,7 +48,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_and_value_known_ok(i
 
 define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %size, i32 %alignment, i8 %value) {
 ; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(
-; CHECK-NEXT:    ret i1 true
+; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(32) ptr @aligned_alloc(i32 3, i32 32)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %aligned_allocation = tail call ptr @aligned_alloc(i32 3, i32 32)
   %cmp = icmp ne ptr %aligned_allocation, null
@@ -55,7 +59,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %s
 
 define i1 @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(i32 %size, i32 %alignment, i8 %value) {
 ; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(
-; CHECK-NEXT:    ret i1 true
+; CHECK-NEXT:    [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(31) ptr @aligned_alloc(i32 8, i32 31)
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %aligned_allocation = tail call ptr @aligned_alloc(i32 8, i32 31)
   %cmp = icmp ne ptr %aligned_allocation, null

>From 3b6f753ad9316692015d54fe5a12ef38021982a9 Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Thu, 19 Oct 2023 16:42:16 +0100
Subject: [PATCH 2/2] [InstCombine] Only check TLI.

---
 .../InstCombine/InstructionCombining.cpp      | 33 +++++++++----------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 1b5c2e8abf62b97..4ae50bd8354b4e9 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2430,30 +2430,27 @@ static bool isAllocSiteRemovable(Instruction *AI,
         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
           return false;
+
         // Do not fold compares to aligned_alloc calls, as they may have to
         // return null in case the required alignment cannot be satisfied,
         // unless we can prove that both alignment and size are valid.
-        if ((getAllocFnKind(AI) & AllocFnKind::Aligned) !=
-            AllocFnKind::Unknown) {
-          auto *CB = cast<CallBase>(AI);
-          Function &F = *CB->getCalledFunction();
-          LibFunc TheLibFunc;
-
+        auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
           // Check if alignment and size of a call to aligned_alloc is valid,
           // that is alignment is a power-of-2 and the size is a multiple of the
           // alignment.
-          auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
-            const APInt *Alignment;
-            const APInt *Size;
-            return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
-                   match(CB->getArgOperand(1), m_APInt(Size)) &&
-                   Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
-          };
-          if (TLI.getLibFunc(F, TheLibFunc) && TLI.has(TheLibFunc) &&
-              TheLibFunc == LibFunc_aligned_alloc &&
-              !AlignmentAndSizeKnownValid(CB))
-            return false;
-        }
+          const APInt *Alignment;
+          const APInt *Size;
+          return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
+                 match(CB->getArgOperand(1), m_APInt(Size)) &&
+                 Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
+        };
+        auto *CB = cast<CallBase>(AI);
+        Function &F = *CB->getCalledFunction();
+        LibFunc TheLibFunc;
+        if (TLI.getLibFunc(F, TheLibFunc) && TLI.has(TheLibFunc) &&
+            TheLibFunc == LibFunc_aligned_alloc &&
+            !AlignmentAndSizeKnownValid(CB))
+          return false;
         Users.emplace_back(I);
         continue;
       }



More information about the llvm-commits mailing list