[llvm] 46ee652 - Revert "[InstSimplify] Remove known bits constant folding"

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Sun May 3 11:45:58 PDT 2020


Author: Nikita Popov
Date: 2020-05-03T20:45:10+02:00
New Revision: 46ee652c70f7922c5f540c4778c65ac0bb79191f

URL: https://github.com/llvm/llvm-project/commit/46ee652c70f7922c5f540c4778c65ac0bb79191f
DIFF: https://github.com/llvm/llvm-project/commit/46ee652c70f7922c5f540c4778c65ac0bb79191f.diff

LOG: Revert "[InstSimplify] Remove known bits constant folding"

This reverts commit 08556afc54e7ddfa7cc2fdd69c615ad417722517.

This breaks some AMDGPU tests.

Added: 
    llvm/test/Transforms/InstSimplify/assume.ll

Modified: 
    llvm/lib/Analysis/InstructionSimplify.cpp
    llvm/test/Analysis/ValueTracking/knownzero-shift.ll
    llvm/test/Transforms/GVN/PRE/volatile.ll
    llvm/test/Transforms/InstSimplify/call.ll
    llvm/test/Transforms/InstSimplify/or.ll
    llvm/test/Transforms/InstSimplify/shift-knownbits.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index ef7f35c90861..7de4a0744c29 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -5600,6 +5600,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
     break;
   case Instruction::Call: {
     Result = SimplifyCall(cast<CallInst>(I), Q);
+    // Don't perform known bits simplification below for musttail calls.
+    if (cast<CallInst>(I)->isMustTailCall())
+      return Result;
     break;
   }
   case Instruction::Freeze:
@@ -5617,6 +5620,14 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
     break;
   }
 
+  // In general, it is possible for computeKnownBits to determine all bits in a
+  // value even when the operands are not all constants.
+  if (!Result && I->getType()->isIntOrIntVectorTy()) {
+    KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
+    if (Known.isConstant())
+      Result = ConstantInt::get(I->getType(), Known.getConstant());
+  }
+
   /// If called on unreachable code, the above logic may report that the
   /// instruction simplified to itself.  Make life easier for users by
   /// detecting that case here, returning a safe value instead.

diff  --git a/llvm/test/Analysis/ValueTracking/knownzero-shift.ll b/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
index 0dcd828aa33f..4ceb822afa18 100644
--- a/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
+++ b/llvm/test/Analysis/ValueTracking/knownzero-shift.ll
@@ -15,15 +15,9 @@ define i1 @test(i8 %p, i8* %pq) {
 
 !0 = !{ i8 1, i8 5 }
 
-; The following cases only get folded by InstCombine,
-; see InstCombine/shift-shift.ll. If we wanted to,
-; we could explicitly handle them in InstSimplify as well.
-
 define i32 @shl_shl(i32 %A) {
 ; CHECK-LABEL: @shl_shl(
-; CHECK-NEXT:    [[B:%.*]] = shl i32 [[A:%.*]], 6
-; CHECK-NEXT:    [[C:%.*]] = shl i32 [[B]], 28
-; CHECK-NEXT:    ret i32 [[C]]
+; CHECK-NEXT:    ret i32 0
 ;
   %B = shl i32 %A, 6
   %C = shl i32 %B, 28
@@ -32,9 +26,7 @@ define i32 @shl_shl(i32 %A) {
 
 define <2 x i33> @shl_shl_splat_vec(<2 x i33> %A) {
 ; CHECK-LABEL: @shl_shl_splat_vec(
-; CHECK-NEXT:    [[B:%.*]] = shl <2 x i33> [[A:%.*]], <i33 5, i33 5>
-; CHECK-NEXT:    [[C:%.*]] = shl <2 x i33> [[B]], <i33 28, i33 28>
-; CHECK-NEXT:    ret <2 x i33> [[C]]
+; CHECK-NEXT:    ret <2 x i33> zeroinitializer
 ;
   %B = shl <2 x i33> %A, <i33 5, i33 5>
   %C = shl <2 x i33> %B, <i33 28, i33 28>
@@ -45,7 +37,7 @@ define <2 x i33> @shl_shl_splat_vec(<2 x i33> %A) {
 
 define <2 x i33> @shl_shl_vec(<2 x i33> %A) {
 ; CHECK-LABEL: @shl_shl_vec(
-; CHECK-NEXT:    [[B:%.*]] = shl <2 x i33> [[A:%.*]], <i33 6, i33 5>
+; CHECK-NEXT:    [[B:%.*]] = shl <2 x i33> %A, <i33 6, i33 5>
 ; CHECK-NEXT:    [[C:%.*]] = shl <2 x i33> [[B]], <i33 27, i33 28>
 ; CHECK-NEXT:    ret <2 x i33> [[C]]
 ;
@@ -56,9 +48,7 @@ define <2 x i33> @shl_shl_vec(<2 x i33> %A) {
 
 define i232 @lshr_lshr(i232 %A) {
 ; CHECK-LABEL: @lshr_lshr(
-; CHECK-NEXT:    [[B:%.*]] = lshr i232 [[A:%.*]], 231
-; CHECK-NEXT:    [[C:%.*]] = lshr i232 [[B]], 1
-; CHECK-NEXT:    ret i232 [[C]]
+; CHECK-NEXT:    ret i232 0
 ;
   %B = lshr i232 %A, 231
   %C = lshr i232 %B, 1
@@ -67,9 +57,7 @@ define i232 @lshr_lshr(i232 %A) {
 
 define <2 x i32> @lshr_lshr_splat_vec(<2 x i32> %A) {
 ; CHECK-LABEL: @lshr_lshr_splat_vec(
-; CHECK-NEXT:    [[B:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 28, i32 28>
-; CHECK-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 4, i32 4>
-; CHECK-NEXT:    ret <2 x i32> [[C]]
+; CHECK-NEXT:    ret <2 x i32> zeroinitializer
 ;
   %B = lshr <2 x i32> %A, <i32 28, i32 28>
   %C = lshr <2 x i32> %B, <i32 4, i32 4>
@@ -78,9 +66,7 @@ define <2 x i32> @lshr_lshr_splat_vec(<2 x i32> %A) {
 
 define <2 x i32> @lshr_lshr_vec(<2 x i32> %A) {
 ; CHECK-LABEL: @lshr_lshr_vec(
-; CHECK-NEXT:    [[B:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 29, i32 28>
-; CHECK-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 4, i32 5>
-; CHECK-NEXT:    ret <2 x i32> [[C]]
+; CHECK-NEXT:    ret <2 x i32> zeroinitializer
 ;
   %B = lshr <2 x i32> %A, <i32 29, i32 28>
   %C = lshr <2 x i32> %B, <i32 4, i32 5>

diff  --git a/llvm/test/Transforms/GVN/PRE/volatile.ll b/llvm/test/Transforms/GVN/PRE/volatile.ll
index 6fd1b096363d..552f8dce7833 100644
--- a/llvm/test/Transforms/GVN/PRE/volatile.ll
+++ b/llvm/test/Transforms/GVN/PRE/volatile.ll
@@ -197,17 +197,14 @@ exit:
   ret i32 %add
 }
 
-; This test checks that we don't optimize away instructions that are
-; simplified by SimplifyInstruction(), but are not trivially dead.
-
 define i32 @test9(i32* %V) {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[LOAD:%.*]] = call i32 undef()
-; CHECK-NEXT:    ret i32 undef
+; CHECK-NEXT:    [[LOAD:%.*]] = load volatile i32, i32* [[V:%.*]], !range !0
+; CHECK-NEXT:    ret i32 0
 ;
 entry:
-  %load = call i32 undef()
+  %load = load volatile i32, i32* %V, !range !0
   ret i32 %load
 }
 

diff  --git a/llvm/test/Transforms/InstSimplify/assume.ll b/llvm/test/Transforms/InstSimplify/assume.ll
new file mode 100644
index 000000000000..a43f90adee37
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/assume.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -instsimplify -S < %s 2>&1 -pass-remarks-analysis=.* | FileCheck %s
+
+; Verify that warnings are emitted for the 2nd and 3rd tests.
+
+; CHECK: remark: /tmp/s.c:1:13: Detected conflicting code assumptions.
+; CHECK: remark: /tmp/s.c:4:10: Detected conflicting code assumptions.
+; CHECK: remark: /tmp/s.c:5:50: Detected conflicting code assumptions.
+
+define void @test1() {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.assume(i1 1)
+  ret void
+
+}
+
+; The alloca guarantees that the low bits of %a are zero because of alignment.
+; The assume says the opposite. The assume is processed last, so that's the
+; return value. There's no way to win (we can't undo transforms that happened
+; based on half-truths), so just don't crash.
+
+define i64 @PR31809() !dbg !7 {
+; CHECK-LABEL: @PR31809(
+; CHECK-NEXT:    ret i64 3
+;
+  %a = alloca i32
+  %t1 = ptrtoint i32* %a to i64, !dbg !9
+  %cond = icmp eq i64 %t1, 3
+  call void @llvm.assume(i1 %cond)
+  ret i64 %t1
+}
+
+; Similar to above: there's no way to know which assumption is truthful,
+; so just don't crash.
+
+define i8 @conflicting_assumptions(i8 %x) !dbg !10 {
+; CHECK-LABEL: @conflicting_assumptions(
+; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 1, !dbg !10
+; CHECK-NEXT:    call void @llvm.assume(i1 false)
+; CHECK-NEXT:    [[COND2:%.*]] = icmp eq i8 [[X]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[COND2]])
+; CHECK-NEXT:    ret i8 [[ADD]]
+;
+  %add = add i8 %x, 1, !dbg !11
+  %cond1 = icmp eq i8 %x, 3
+  call void @llvm.assume(i1 %cond1)
+  %cond2 = icmp eq i8 %x, 4
+  call void @llvm.assume(i1 %cond2)
+  ret i8 %add
+}
+
+; Another case of conflicting assumptions. This would crash because we'd
+; try to set more known bits than existed in the known bits struct.
+
+define void @PR36270(i32 %b) !dbg !13 {
+; CHECK-LABEL: @PR36270(
+; CHECK-NEXT:    tail call void @llvm.assume(i1 false)
+; CHECK-NEXT:    unreachable
+;
+  %B7 = xor i32 -1, 2147483647
+  %and1 = and i32 %b, 3
+  %B12 = lshr i32 %B7, %and1, !dbg !14
+  %C1 = icmp ult i32 %and1, %B12
+  tail call void @llvm.assume(i1 %C1)
+  %cmp2 = icmp eq i32 0, %B12
+  tail call void @llvm.assume(i1 %cmp2)
+  unreachable
+}
+
+declare void @llvm.assume(i1) nounwind
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4, !5}
+!llvm.ident = !{!6}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 4.0.0 (trunk 282540) (llvm/trunk 282542)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, enums: !2)
+!1 = !DIFile(filename: "/tmp/s.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{i32 1, !"PIC Level", i32 2}
+!6 = !{!"clang version 4.0.0 (trunk 282540) (llvm/trunk 282542)"}
+!7 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !8, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
+!8 = !DISubroutineType(types: !2)
+!9 = !DILocation(line: 1, column: 13, scope: !7)
+!10 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: true, unit: !0, retainedNodes: !2)
+!11 = !DILocation(line: 4, column: 10, scope: !10)
+!12 = !DILocation(line: 4, column: 3, scope: !10)
+!13 = distinct !DISubprogram(name: "PR36270", scope: !1, file: !1, line: 3, type: !8, isLocal: false, isDefinition: true, scopeLine: 3, isOptimized: true, unit: !0, retainedNodes: !2)
+!14 = !DILocation(line: 5, column: 50, scope: !13)
+

diff  --git a/llvm/test/Transforms/InstSimplify/call.ll b/llvm/test/Transforms/InstSimplify/call.ll
index 9d0147b5ba52..07fdcdbbd5e6 100644
--- a/llvm/test/Transforms/InstSimplify/call.ll
+++ b/llvm/test/Transforms/InstSimplify/call.ll
@@ -988,7 +988,7 @@ declare i8* @passthru_p8(i8* returned)
 define i32 @returned_const_int_arg() {
 ; CHECK-LABEL: @returned_const_int_arg(
 ; CHECK-NEXT:    [[X:%.*]] = call i32 @passthru_i32(i32 42)
-; CHECK-NEXT:    ret i32 [[X]]
+; CHECK-NEXT:    ret i32 42
 ;
   %x = call i32 @passthru_i32(i32 42)
   ret i32 %x

diff  --git a/llvm/test/Transforms/InstSimplify/or.ll b/llvm/test/Transforms/InstSimplify/or.ll
index 465b30c9cf94..7369cddf2fbe 100644
--- a/llvm/test/Transforms/InstSimplify/or.ll
+++ b/llvm/test/Transforms/InstSimplify/or.ll
@@ -98,17 +98,10 @@ define i8 @test10(i8 %A) {
   ret i8 %D
 }
 
-; The following two cases only get folded by InstCombine,
-; see InstCombine/or-xor.ll.
-
 ; (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
 define i8 @test11(i8 %A) {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    [[B:%.*]] = or i8 [[A:%.*]], -2
-; CHECK-NEXT:    [[C:%.*]] = xor i8 [[B]], 13
-; CHECK-NEXT:    [[D:%.*]] = or i8 [[C]], 1
-; CHECK-NEXT:    [[E:%.*]] = xor i8 [[D]], 12
-; CHECK-NEXT:    ret i8 [[E]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %B = or i8 %A, -2
   %C = xor i8 %B, 13
@@ -119,12 +112,7 @@ define i8 @test11(i8 %A) {
 
 define i8 @test11v(<2 x i8> %A) {
 ; CHECK-LABEL: @test11v(
-; CHECK-NEXT:    [[B:%.*]] = or <2 x i8> [[A:%.*]], <i8 -2, i8 0>
-; CHECK-NEXT:    [[CV:%.*]] = xor <2 x i8> [[B]], <i8 13, i8 13>
-; CHECK-NEXT:    [[C:%.*]] = extractelement <2 x i8> [[CV]], i32 0
-; CHECK-NEXT:    [[D:%.*]] = or i8 [[C]], 1
-; CHECK-NEXT:    [[E:%.*]] = xor i8 [[D]], 12
-; CHECK-NEXT:    ret i8 [[E]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %B = or <2 x i8> %A, <i8 -2, i8 0>
   %CV = xor <2 x i8> %B, <i8 13, i8 13>

diff  --git a/llvm/test/Transforms/InstSimplify/shift-knownbits.ll b/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
index c023048fb538..66e987182190 100644
--- a/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
+++ b/llvm/test/Transforms/InstSimplify/shift-knownbits.ll
@@ -145,8 +145,7 @@ define i1 @shl_i1(i1 %a, i1 %b) {
   ret i1 %shl
 }
 
-; The following cases only get folded by InstCombine,
-; see InstCombine/lshr.ll.
+; Simplify count leading/trailing zeros to zero if all valid bits are shifted out.
 
 declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
@@ -155,9 +154,7 @@ declare <2 x i8> @llvm.ctlz.v2i8(<2 x i8>, i1) nounwind readnone
 
 define i32 @lshr_ctlz_zero_is_undef(i32 %x) {
 ; CHECK-LABEL: @lshr_ctlz_zero_is_undef(
-; CHECK-NEXT:    [[CT:%.*]] = call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[CT]], 5
-; CHECK-NEXT:    ret i32 [[SH]]
+; CHECK-NEXT:    ret i32 0
 ;
   %ct = call i32 @llvm.ctlz.i32(i32 %x, i1 true)
   %sh = lshr i32 %ct, 5
@@ -166,9 +163,7 @@ define i32 @lshr_ctlz_zero_is_undef(i32 %x) {
 
 define i32 @lshr_cttz_zero_is_undef(i32 %x) {
 ; CHECK-LABEL: @lshr_cttz_zero_is_undef(
-; CHECK-NEXT:    [[CT:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[CT]], 5
-; CHECK-NEXT:    ret i32 [[SH]]
+; CHECK-NEXT:    ret i32 0
 ;
   %ct = call i32 @llvm.cttz.i32(i32 %x, i1 true)
   %sh = lshr i32 %ct, 5
@@ -177,9 +172,7 @@ define i32 @lshr_cttz_zero_is_undef(i32 %x) {
 
 define <2 x i8> @lshr_ctlz_zero_is_undef_splat_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @lshr_ctlz_zero_is_undef_splat_vec(
-; CHECK-NEXT:    [[CT:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 3>
-; CHECK-NEXT:    ret <2 x i8> [[SH]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %ct = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 true)
   %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
@@ -188,10 +181,7 @@ define <2 x i8> @lshr_ctlz_zero_is_undef_splat_vec(<2 x i8> %x) {
 
 define i8 @lshr_ctlz_zero_is_undef_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @lshr_ctlz_zero_is_undef_vec(
-; CHECK-NEXT:    [[CT:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 0>
-; CHECK-NEXT:    [[EX:%.*]] = extractelement <2 x i8> [[SH]], i32 0
-; CHECK-NEXT:    ret i8 [[EX]]
+; CHECK-NEXT:    ret i8 0
 ;
   %ct = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 true)
   %sh = lshr <2 x i8> %ct, <i8 3, i8 0>
@@ -201,9 +191,7 @@ define i8 @lshr_ctlz_zero_is_undef_vec(<2 x i8> %x) {
 
 define <2 x i8> @lshr_cttz_zero_is_undef_splat_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @lshr_cttz_zero_is_undef_splat_vec(
-; CHECK-NEXT:    [[CT:%.*]] = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 3>
-; CHECK-NEXT:    ret <2 x i8> [[SH]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %ct = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 true)
   %sh = lshr <2 x i8> %ct, <i8 3, i8 3>
@@ -212,10 +200,7 @@ define <2 x i8> @lshr_cttz_zero_is_undef_splat_vec(<2 x i8> %x) {
 
 define i8 @lshr_cttz_zero_is_undef_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @lshr_cttz_zero_is_undef_vec(
-; CHECK-NEXT:    [[CT:%.*]] = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> [[X:%.*]], i1 true)
-; CHECK-NEXT:    [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 0>
-; CHECK-NEXT:    [[EX:%.*]] = extractelement <2 x i8> [[SH]], i32 0
-; CHECK-NEXT:    ret i8 [[EX]]
+; CHECK-NEXT:    ret i8 0
 ;
   %ct = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 true)
   %sh = lshr <2 x i8> %ct, <i8 3, i8 0>


        


More information about the llvm-commits mailing list