[llvm] 0af5c06 - [InstCombine] Don't consider aligned_alloc removable if icmp uses result (#69474)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 19 10:35:30 PDT 2023
Author: Florian Hahn
Date: 2023-10-19T18:35:27+01:00
New Revision: 0af5c0668a1b93c7b3b34a1885b494c4ebb0b46f
URL: https://github.com/llvm/llvm-project/commit/0af5c0668a1b93c7b3b34a1885b494c4ebb0b46f
DIFF: https://github.com/llvm/llvm-project/commit/0af5c0668a1b93c7b3b34a1885b494c4ebb0b46f.diff
LOG: [InstCombine] Don't consider aligned_alloc removable if icmp uses result (#69474)
At the moment, all alloc-like functions are assumed to return non-null
pointers, if their return value is only used in a compare. This is based
on being allowed to substitute the allocation function with one that
doesn't fail to allocate the required memory.
aligned_alloc however must also return null if the required alignment
cannot be satisfied, so I don't think the same reasoning as above can be
applied to it.
This patch adds a bail-out for aligned_alloc calls to
isAllocSiteRemovable.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/test/Transforms/InstCombine/malloc-free.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 8a6f66e36bd80e9..559eb2ef4795eb1 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2430,6 +2430,26 @@ static bool isAllocSiteRemovable(Instruction *AI,
unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
return false;
+
+ // Do not fold compares to aligned_alloc calls, as they may have to
+ // return null in case the required alignment cannot be satisfied,
+ // unless we can prove that both alignment and size are valid.
+ auto AlignmentAndSizeKnownValid = [](CallBase *CB) {
+ // Check if alignment and size of a call to aligned_alloc is valid,
+ // that is alignment is a power-of-2 and the size is a multiple of the
+ // alignment.
+ const APInt *Alignment;
+ const APInt *Size;
+ return match(CB->getArgOperand(0), m_APInt(Alignment)) &&
+ match(CB->getArgOperand(1), m_APInt(Size)) &&
+ Alignment->isPowerOf2() && Size->urem(*Alignment).isZero();
+ };
+ auto *CB = dyn_cast<CallBase>(AI);
+ LibFunc TheLibFunc;
+ if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
+ TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
+ !AlignmentAndSizeKnownValid(CB))
+ return false;
Users.emplace_back(I);
continue;
}
diff --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll
index b77f70f239921e8..10725950a1c7373 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free.ll
@@ -26,9 +26,11 @@ define i32 @dead_aligned_alloc(i32 %size, i32 %alignment, i8 %value) {
ret i32 0
}
-define i1 @aligned_alloc_pointer_only_used_by_cmp(i32 %size, i32 %alignment, i8 %value) {
-; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp(
-; CHECK-NEXT: ret i1 true
+define i1 @aligned_alloc_only_pointe(i32 %size, i32 %alignment, i8 %value) {
+; CHECK-LABEL: @aligned_alloc_only_pointe(
+; CHECK-NEXT: [[ALIGNED_ALLOCATION:%.*]] = tail call ptr @aligned_alloc(i32 [[ALIGNMENT:%.*]], i32 [[SIZE:%.*]])
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT: ret i1 [[CMP]]
;
%aligned_allocation = tail call ptr @aligned_alloc(i32 %alignment, i32 %size)
%cmp = icmp ne ptr %aligned_allocation, null
@@ -46,7 +48,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_and_value_known_ok(i
define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(
-; CHECK-NEXT: ret i1 true
+; CHECK-NEXT: [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(32) ptr @aligned_alloc(i32 3, i32 32)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT: ret i1 [[CMP]]
;
%aligned_allocation = tail call ptr @aligned_alloc(i32 3, i32 32)
%cmp = icmp ne ptr %aligned_allocation, null
@@ -55,7 +59,9 @@ define i1 @aligned_alloc_pointer_only_used_by_cmp_alignment_no_power_of_2(i32 %s
define i1 @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(i32 %size, i32 %alignment, i8 %value) {
; CHECK-LABEL: @aligned_alloc_pointer_only_used_by_cmp_size_not_multiple_of_alignment(
-; CHECK-NEXT: ret i1 true
+; CHECK-NEXT: [[ALIGNED_ALLOCATION:%.*]] = tail call dereferenceable_or_null(31) ptr @aligned_alloc(i32 8, i32 31)
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[ALIGNED_ALLOCATION]], null
+; CHECK-NEXT: ret i1 [[CMP]]
;
%aligned_allocation = tail call ptr @aligned_alloc(i32 8, i32 31)
%cmp = icmp ne ptr %aligned_allocation, null
More information about the llvm-commits
mailing list