[llvm] r267214 - Fold compares for distinct allocations

Sanjoy Das via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 22 13:52:25 PDT 2016


Author: sanjoy
Date: Fri Apr 22 15:52:25 2016
New Revision: 267214

URL: http://llvm.org/viewvc/llvm-project?rev=267214&view=rev
Log:
Fold compares for distinct allocations

Summary:
We can fold compares to false when two distinct allocations within a
function are compared for equality.

Patch by Anna Thomas!

Reviewers: majnemer, reames, sanjoy

Subscribers: llvm-commits

Differential Revision: http://reviews.llvm.org/D19390

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp?rev=267214&r1=267213&r2=267214&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp Fri Apr 22 15:52:25 2016
@@ -1862,12 +1862,18 @@ Instruction *InstCombiner::visitGetEleme
   return nullptr;
 }
 
-static bool isNeverEqualToUnescapedAlloc(Value *V) {
+static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI,
+                                         Instruction *AI) {
   if (isa<ConstantPointerNull>(V))
     return true;
   if (auto *LI = dyn_cast<LoadInst>(V))
     return isa<GlobalVariable>(LI->getPointerOperand());
-  return false;
+  // Two distinct allocations will never be equal.
+  // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking
+  // through bitcasts of V can cause
+  // the result statement below to be true, even when AI and V (ex:
+  // i8* ->i32* ->i8* of AI) are the same allocations.
+  return isAllocLikeFn(V, TLI) && V != AI;
 }
 
 static bool
@@ -1894,12 +1900,12 @@ isAllocSiteRemovable(Instruction *AI, Sm
       case Instruction::ICmp: {
         ICmpInst *ICI = cast<ICmpInst>(I);
         // We can fold eq/ne comparisons with null to false/true, respectively.
-        // We fold comparisons in some conditions provided the alloc has not
-        // escaped.
+        // We also fold comparisons in some conditions provided the alloc has
+        // not escaped.
         if (!ICI->isEquality())
           return false;
         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
-        if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex)))
+        if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
           return false;
         Users.emplace_back(I);
         continue;

Modified: llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll?rev=267214&r1=267213&r2=267214&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/compare-unescaped.ll Fri Apr 22 15:52:25 2016
@@ -40,3 +40,53 @@ define i32 @compare_and_call_with_deopt(
   ret i32 %rt 
 ; CHECK: ret i32 %rt
 }
+
+define i1 @compare_distinct_mallocs() {
+  %m = call i8* @malloc(i64 4)
+  %n = call i8* @malloc(i64 4)
+  %cmp = icmp eq i8* %m, %n
+  ret i1 %cmp
+  ; CHECK-LABEL: compare_distinct_mallocs
+  ; CHECK: ret i1 false
+}
+
+; the compare is folded to true since the folding compare looks through bitcasts. 
+; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc 
+define i1 @compare_samepointer_under_bitcast() {
+  %m = call i8* @malloc(i64 4)
+  %bc = bitcast i8* %m to i32*
+  %bcback = bitcast i32* %bc to i8*
+  %cmp = icmp eq i8* %m, %bcback
+  ret i1 %cmp
+; CHECK-LABEL: compare_samepointer_under_bitcast
+; CHECK: ret i1 true 
+}
+
+; the compare is folded to true since the folding compare looks through bitcasts. 
+; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc 
+define i1 @compare_samepointer_escaped() {
+  %m = call i8* @malloc(i64 4)
+  %bc = bitcast i8* %m to i32*
+  %bcback = bitcast i32* %bc to i8*
+  %cmp = icmp eq i8* %m, %bcback
+  call void @f() [ "deopt"(i8* %m) ]
+  ret i1 %cmp
+; CHECK-LABEL: compare_samepointer_escaped
+; CHECK-NEXT: %m = call i8* @malloc(i64 4)
+; CHECK-NEXT: call void @f() [ "deopt"(i8* %m) ]
+; CHECK: ret i1 true 
+}
+
+; The malloc call for %m cannot be elided since it is used in the call to function f.
+; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations
+define i1 @compare_distinct_pointer_escape() {
+  %m = call i8* @malloc(i64 4)
+  %n = call i8* @malloc(i64 4)
+  tail call void @f() [ "deopt"(i8* %m) ]
+  %cmp = icmp ne i8* %m, %n
+  ret i1 %cmp
+; CHECK-LABEL: compare_distinct_pointer_escape
+; CHECK-NEXT: %m = call i8* @malloc(i64 4)
+; CHECK-NEXT: tail call void @f() [ "deopt"(i8* %m) ]
+; CHECK-NEXT: ret i1 true
+}




More information about the llvm-commits mailing list