[llvm] f1106ef - [InstCombine] Remove computeKnownBits() fold for returns

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Tue May 30 03:16:59 PDT 2023


Author: Nikita Popov
Date: 2023-05-30T12:16:51+02:00
New Revision: f1106ef6c9d14d5b516ec352279aeee8f9d12818

URL: https://github.com/llvm/llvm-project/commit/f1106ef6c9d14d5b516ec352279aeee8f9d12818
DIFF: https://github.com/llvm/llvm-project/commit/f1106ef6c9d14d5b516ec352279aeee8f9d12818.diff

LOG: [InstCombine] Remove computeKnownBits() fold for returns

We try to fold constant computeKnownBits() with context for return
instructions only. Otherwise, we rely on SimplifyDemandedBits() to
fold instructions with constant known bits.

The presence of this special fold for returns is dangerous, because
it makes our tests lie about what works and what doesn't. Tests are
usually written by returning the result we're interested in, but
will go through this separate code path that is not used for anything
else. This patch removes the special fold.

This primarily regresses patterns of the style "assume(x); return x".
The responsibility of handling such patterns lies with passes like
EarlyCSE/GVN anyway, which will do this reliably, and not just for
returns.

Differential Revision: https://reviews.llvm.org/D151099

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/test/Transforms/InstCombine/assume.ll
    llvm/test/Transforms/InstCombine/known-phi-br.ll
    llvm/test/Transforms/InstCombine/zext-or-icmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 2af6ba5c81822..682005282d92a 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2471,25 +2471,7 @@ static bool isMustTailCall(Value *V) {
 }
 
 Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {
-  if (RI.getNumOperands() == 0) // ret void
-    return nullptr;
-
-  Value *ResultOp = RI.getOperand(0);
-  Type *VTy = ResultOp->getType();
-  if (!VTy->isIntegerTy() || isa<Constant>(ResultOp))
-    return nullptr;
-
-  // Don't replace result of musttail calls.
-  if (isMustTailCall(ResultOp))
-    return nullptr;
-
-  // There might be assume intrinsics dominating this return that completely
-  // determine the value. If so, constant fold it.
-  KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
-  if (Known.isConstant())
-    return replaceOperand(RI, 0,
-        Constant::getIntegerValue(VTy, Known.getConstant()));
-
+  // Nothing for now.
   return nullptr;
 }
 

diff  --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index 467ecec60a3f5..83ff0e3a392dd 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=instcombine -S  -instcombine-infinite-loop-threshold=2  | FileCheck --check-prefixes=CHECK,DEFAULT %s
-; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S  -instcombine-infinite-loop-threshold=2  | FileCheck --check-prefixes=CHECK,BUNDLES %s
+; RUN: opt < %s -passes=instcombine -S  -instcombine-infinite-loop-threshold=3  | FileCheck --check-prefixes=CHECK,DEFAULT %s
+; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S  -instcombine-infinite-loop-threshold=3  | FileCheck --check-prefixes=CHECK,BUNDLES %s
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
@@ -60,7 +60,7 @@ define i32 @simple(i32 %a) #1 {
 ; CHECK-LABEL: @simple(
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 4
+; CHECK-NEXT:    ret i32 [[A]]
 ;
   %cmp = icmp eq i32 %a, 4
   tail call void @llvm.assume(i1 %cmp)
@@ -204,7 +204,8 @@ define i32 @icmp1(i32 %a) #0 {
 ; CHECK-LABEL: @icmp1(
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT:    ret i32 1
+; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT:    ret i32 [[CONV]]
 ;
   %cmp = icmp sgt i32 %a, 5
   tail call void @llvm.assume(i1 %cmp)
@@ -231,7 +232,7 @@ define i1 @assume_not(i1 %cond) {
 ; CHECK-LABEL: @assume_not(
 ; CHECK-NEXT:    [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[NOTCOND]])
-; CHECK-NEXT:    ret i1 false
+; CHECK-NEXT:    ret i1 [[COND]]
 ;
   %notcond = xor i1 %cond, true
   call void @llvm.assume(i1 %notcond)
@@ -382,10 +383,7 @@ define i1 @nonnull5(ptr %a) {
 
 define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
 ; CHECK-LABEL: @assumption_conflicts_with_known_bits(
-; CHECK-NEXT:    [[AND1:%.*]] = and i32 [[B:%.*]], 3
 ; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
-; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[AND1]], 0
-; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
 ; CHECK-NEXT:    ret i32 0
 ;
   %and1 = and i32 %b, 3
@@ -451,7 +449,7 @@ define i1 @nonnull3A(ptr %a, i1 %control) {
 ; DEFAULT:       taken:
 ; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
 ; DEFAULT-NEXT:    call void @llvm.assume(i1 [[CMP]])
-; DEFAULT-NEXT:    ret i1 true
+; DEFAULT-NEXT:    ret i1 [[CMP]]
 ; DEFAULT:       not_taken:
 ; DEFAULT-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
 ; DEFAULT-NEXT:    ret i1 [[RVAL_2]]
@@ -487,7 +485,7 @@ define i1 @nonnull3B(ptr %a, i1 %control) {
 ; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ]
-; CHECK-NEXT:    ret i1 true
+; CHECK-NEXT:    ret i1 [[CMP]]
 ; CHECK:       not_taken:
 ; CHECK-NEXT:    ret i1 [[CONTROL]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/known-phi-br.ll b/llvm/test/Transforms/InstCombine/known-phi-br.ll
index 64d3344eb2066..1ad0ed42d8d34 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-br.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-br.ll
@@ -15,7 +15,8 @@ define i64 @limit_i64_eq_7(i64 %x) {
 ; CHECK:       body:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    ret i64 7
+; CHECK-NEXT:    [[RES:%.*]] = phi i64 [ [[X]], [[ENTRY:%.*]] ], [ 7, [[BODY]] ]
+; CHECK-NEXT:    ret i64 [[RES]]
 ;
 entry:
   %cmp = icmp eq i64 %x, 7
@@ -37,7 +38,8 @@ define i64 @limit_i64_ne_255(i64 %x) {
 ; CHECK:       body:
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
-; CHECK-NEXT:    ret i64 255
+; CHECK-NEXT:    [[RES:%.*]] = phi i64 [ [[X]], [[ENTRY:%.*]] ], [ 255, [[BODY]] ]
+; CHECK-NEXT:    ret i64 [[RES]]
 ;
 entry:
   %cmp = icmp ne i64 %x, 255

diff  --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
index 655c2e7c083a9..7d57cd21f4e83 100644
--- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -251,7 +251,7 @@ define i1 @PR51762(ptr %i, i32 %t0, i16 %t1, ptr %p, ptr %d, ptr %f, i32 %p2, i1
 ; CHECK-NEXT:    store i32 [[SROA38]], ptr [[D]], align 8
 ; CHECK-NEXT:    [[R:%.*]] = icmp ult i64 [[INSERT_INSERT41]], [[CONV19]]
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[R]])
-; CHECK-NEXT:    ret i1 true
+; CHECK-NEXT:    ret i1 [[R]]
 ;
 entry:
   br label %for.cond


        


More information about the llvm-commits mailing list