[llvm] r363479 - [InstCombine] Add tests to show missing fold opportunity for "icmp and shift" (nfc).

Huihui Zhang via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 14 17:33:41 PDT 2019


Author: huihuiz
Date: Fri Jun 14 17:33:41 2019
New Revision: 363479

URL: http://llvm.org/viewvc/llvm-project?rev=363479&view=rev
Log:
[InstCombine] Add tests to show missing fold opportunity for "icmp and shift" (nfc).

Summary:
For icmp pred (and (sh X, Y), C), 0

  When C is signbit, expect to fold (X << Y) & signbit ==/!= 0 into (X << Y) >=/< 0,
  rather than (X & (signbit >> Y)) != 0.

  When C+1 is power of 2, expect to fold (X << Y) & ~C ==/!= 0 into (X << Y) </>= C+1,
  rather than (X & (~C >> Y)) == 0.

For icmp pred (and X, (sh signbit, Y)), 0

  Expect to fold (X & (signbit l>> Y)) ==/!= 0 into (X << Y) >=/< 0
  Expect to fold (X & (signbit << Y)) ==/!= 0 into (X l>> Y) >=/< 0

  Reviewers: lebedev.ri, efriedma, spatel, craig.topper

  Reviewed By: lebedev.ri

  Subscribers: llvm-commits

  Tags: #llvm

  Differential Revision: https://reviews.llvm.org/D63025

Added:
    llvm/trunk/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll
    llvm/trunk/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll
    llvm/trunk/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll
    llvm/trunk/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll
    llvm/trunk/test/Transforms/InstCombine/signbit-lshr-and-icmpeq-zero.ll
    llvm/trunk/test/Transforms/InstCombine/signbit-shl-and-icmpeq-zero.ll

Added: llvm/trunk/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern ((X l>> Y) & ~C) ==/!= 0; when C+1 is power of 2
+; it may be optimal to fold into (X l>> Y) </>= C+1
+; rather than X & (~C << Y) ==/!= 0
+
+; Scalar tests
+
+define i1 @scalar_i8_lshr_and_negC_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_lshr_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i8 -4, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i8 %x, %y
+  %and = and i8 %lshr, 252  ; ~3
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_lshr_and_negC_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_lshr_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i16 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i16 %x, %y
+  %and = and i16 %lshr, 65408  ; ~127
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_negC_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 -262144, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294705152  ; ~262143
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_lshr_and_negC_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_lshr_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 -8589934592, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i64 %x, %y
+  %and = and i64 %lshr, 18446744065119617024  ; ~8589934591
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_negC_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 -262144, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294705152  ; ~262143
+  %r = icmp ne i32 %and, 0   ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_lshr_and_negC_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_lshr_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <4 x i32> <i32 -8, i32 -8, i32 -8, i32 -8>, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 4294967288, i32 4294967288, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_lshr_and_negC_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_lshr_and_negC_eq_undef1(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -8, i32 undef, i32 -8, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 4294967288, i32 undef, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_lshr_and_negC_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_lshr_and_negC_eq_undef2(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -8, i32 -8, i32 -8, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 4294967288, i32 4294967288, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_lshr_and_negC_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_lshr_and_negC_eq_undef3(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -8, i32 -8, i32 undef, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 4294967288, i32 4294967288, i32 undef, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_lshr_and_negC_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_lshr(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[LSHR]], 8
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %xor = xor i32 %lshr, %z  ; extra use of lshr
+  store i32 %xor, i32* %p
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_lshr_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], -8
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_lshr_and_negC_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_lshr_and_negC_eq_extra_use_lshr_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], -8
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %lshr, %z  ; extra use of lshr
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_lshr_and_negC_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_X_is_constant1(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 12345, [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[LSHR]], 8
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 12345, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_negC_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_X_is_constant2(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 268435456, [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[LSHR]], 8
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 268435456, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_lshr_and_negC_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_slt(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[LSHR]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_lshr_and_negC_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_nonzero(
+; CHECK-NEXT:    ret i1 false
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}

Added: llvm/trunk/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/lshr-and-signbit-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,236 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern ((X l>> Y) & signbit) ==/!= 0
+; it may be optimal to fold into (X l>> Y) >=/< 0
+; rather than X & (signbit << Y) ==/!= 0
+
+; Scalar tests
+
+define i1 @scalar_i8_lshr_and_signbit_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_lshr_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i8 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i8 %x, %y
+  %and = and i8 %lshr, 128
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_lshr_and_signbit_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_lshr_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i16 -32768, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i16 %x, %y
+  %and = and i16 %lshr, 32768
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_signbit_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_lshr_and_signbit_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_lshr_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 -9223372036854775808, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i64 %x, %y
+  %and = and i64 %lshr, 9223372036854775808
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_signbit_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_signbit_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp ne i32 %and, 0  ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_lshr_and_signbit_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef1(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -2147483648, i32 undef, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 2147483648, i32 undef, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef2(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 undef, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 undef, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_lshr_and_signbit_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_lshr_and_signbit_eq_undef3(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], <i32 -2147483648, i32 undef, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> %x, %y
+  %and = and <4 x i32> %lshr, <i32 2147483648, i32 undef, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_lshr(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i32 [[LSHR]], -1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %xor = xor i32 %lshr, %z  ; extra use of lshr
+  store i32 %xor, i32* %p
+  %and = and i32 %lshr, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_lshr_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], -2147483648
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_lshr_and_signbit_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_lshr_and_signbit_eq_extra_use_lshr_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], -2147483648
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %lshr, %z  ; extra use of lshr
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_lshr_and_signbit_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_signbit_eq_X_is_constant1(
+; CHECK-NEXT:    ret i1 true
+;
+  %lshr = lshr i32 12345, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_lshr_and_negC_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_X_is_constant2(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i32 [[LSHR]], -1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_lshr_and_negC_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_slt(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[LSHR]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_lshr_and_negC_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_lshr_and_negC_eq_nonzero(
+; CHECK-NEXT:    ret i1 false
+;
+  %lshr = lshr i32 %x, %y
+  %and = and i32 %lshr, 2147483648
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}

Added: llvm/trunk/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/shl-and-negC-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,237 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern ((X << Y) & ~C) ==/!= 0; when C+1 is power of 2
+; it may be optimal to fold into (X << Y) </>= C+1
+; rather than X & (~C l>> Y) ==/!= 0
+
+; Scalar tests
+
+define i1 @scalar_i8_shl_and_negC_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_shl_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -4, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i8 %x, %y
+  %and = and i8 %shl, 252  ; ~3
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_shl_and_negC_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_shl_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i16 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i16 %x, %y
+  %and = and i16 %shl, 65408  ; ~127
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_negC_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 -262144, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294705152  ; ~262143
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_shl_and_negC_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_shl_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 -8589934592, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i64 %x, %y
+  %and = and i64 %shl, 18446744065119617024  ; ~8589934591
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_negC_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 -262144, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294705152  ; ~262143
+  %r = icmp ne i32 %and, 0   ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_shl_and_negC_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_shl_and_negC_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> <i32 -8, i32 -8, i32 -8, i32 -8>, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 4294967288, i32 4294967288, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_shl_and_negC_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_shl_and_negC_eq_undef1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -8, i32 undef, i32 -8, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 4294967288, i32 undef, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_shl_and_negC_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_shl_and_negC_eq_undef2(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -8, i32 -8, i32 -8, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 4294967288, i32 4294967288, i32 4294967288, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_shl_and_negC_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_shl_and_negC_eq_undef3(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -8, i32 -8, i32 undef, i32 -8>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 4294967288, i32 4294967288, i32 undef, i32 4294967288>  ; ~7
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_shl_and_negC_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_shl(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[SHL]], 8
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %xor = xor i32 %shl, %z  ; extra use of shl
+  store i32 %xor, i32* %p
+  %and = and i32 %shl, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_shl_and_negC_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], -8
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_shl_and_negC_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_shl_and_negC_eq_extra_use_shl_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], -8
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %shl, %z  ; extra use of shl
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_shl_and_negC_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_eq_X_is_constant1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 12345, [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[SHL]], 8
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 12345, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_negC_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_eq_X_is_constant2(
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[Y:%.*]], 3
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 1, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_shl_and_negC_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_slt(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_shl_and_negC_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_negC_eq_nonzero(
+; CHECK-NEXT:    ret i1 false
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 4294967288  ; ~7
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}

Added: llvm/trunk/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/shl-and-signbit-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,237 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern ((X << Y) & signbit) ==/!= 0
+; it may be optimal to fold into (X << Y) >=/< 0
+; rather than X & (signbit l>> Y) ==/!= 0
+
+; Scalar tests
+
+define i1 @scalar_i8_shl_and_signbit_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_shl_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i8 %x, %y
+  %and = and i8 %shl, 128
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_shl_and_signbit_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_shl_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i16 -32768, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i16 %x, %y
+  %and = and i16 %shl, 32768
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_signbit_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_shl_and_signbit_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_shl_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 -9223372036854775808, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i64 %x, %y
+  %and = and i64 %shl, 9223372036854775808
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_signbit_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[TMP2]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp ne i32 %and, 0  ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_shl_and_signbit_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[Y:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[TMP2]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -2147483648, i32 undef, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 2147483648, i32 undef, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef2(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 undef, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 undef, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_shl_and_signbit_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_shl_and_signbit_eq_undef3(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], <i32 -2147483648, i32 undef, i32 -2147483648, i32 -2147483648>
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> %x, %y
+  %and = and <4 x i32> %shl, <i32 2147483648, i32 undef, i32 2147483648, i32 2147483648>
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_shl_and_signbit_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_shl(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %xor = xor i32 %shl, %z  ; extra use of shl
+  store i32 %xor, i32* %p
+  %and = and i32 %shl, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_shl_and_signbit_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], -2147483648
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_shl_and_signbit_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_shl_and_signbit_eq_extra_use_shl_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], -2147483648
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %shl, %z  ; extra use of shl
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_shl_and_signbit_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_eq_X_is_constant1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 12345, [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 12345, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_shl_and_signbit_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_eq_X_is_constant2(
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[Y:%.*]], 31
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 1, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_shl_and_signbit_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_slt(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_shl_and_signbit_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_shl_and_signbit_eq_nonzero(
+; CHECK-NEXT:    ret i1 false
+;
+  %shl = shl i32 %x, %y
+  %and = and i32 %shl, 2147483648
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}

Added: llvm/trunk/test/Transforms/InstCombine/signbit-lshr-and-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/signbit-lshr-and-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/signbit-lshr-and-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/signbit-lshr-and-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,242 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern (X & (signbit l>> Y)) ==/!= 0
+; it may be optimal to fold into (X << Y) >=/< 0
+
+; Scalar tests
+
+define i1 @scalar_i8_signbit_lshr_and_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_signbit_lshr_and_eq(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i8 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i8 128, %y
+  %and = and i8 %lshr, %x
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_signbit_lshr_and_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_signbit_lshr_and_eq(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i16 -32768, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i16 32768, %y
+  %and = and i16 %lshr, %x
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_lshr_and_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_signbit_lshr_and_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_signbit_lshr_and_eq(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i64 -9223372036854775808, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i64 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i64 9223372036854775808, %y
+  %and = and i64 %lshr, %x
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_lshr_and_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_ne(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  %r = icmp ne i32 %and, 0  ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_signbit_lshr_and_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>, %y
+  %and = and <4 x i32> %lshr, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef1(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %lshr, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef2(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %lshr, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_lshr_and_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_lshr_and_eq_undef3(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 undef, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %lshr = lshr <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %lshr, %x
+  %r = icmp eq <4 x i32> %and, <i32 undef, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %xor = xor i32 %lshr, %z  ; extra use of lshr
+  store i32 %xor, i32* %p
+  %and = and i32 %lshr, %x
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_extra_use_lshr_and(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[LSHR]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %lshr, %z  ; extra use of lshr
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_signbit_lshr_and_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_X_is_constant1(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], 12345
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, 12345
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_lshr_and_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_X_is_constant2(
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[Y:%.*]], 31
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, 1
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_signbit_lshr_and_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_slt(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_signbit_lshr_and_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_lshr_and_eq_nonzero(
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[LSHR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %lshr = lshr i32 2147483648, %y
+  %and = and i32 %lshr, %x
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}

Added: llvm/trunk/test/Transforms/InstCombine/signbit-shl-and-icmpeq-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/signbit-shl-and-icmpeq-zero.ll?rev=363479&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/signbit-shl-and-icmpeq-zero.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/signbit-shl-and-icmpeq-zero.ll Fri Jun 14 17:33:41 2019
@@ -0,0 +1,244 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt %s -instcombine -S | FileCheck %s
+
+; For pattern (X & (signbit << Y)) ==/!= 0
+; it may be optimal to fold into (X l>> Y) >=/< 0
+
+; Scalar tests
+
+define i1 @scalar_i8_signbit_shl_and_eq(i8 %x, i8 %y) {
+; CHECK-LABEL: @scalar_i8_signbit_shl_and_eq(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i8 -128, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i8 128, %y
+  %and = and i8 %shl, %x
+  %r = icmp eq i8 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i16_signbit_shl_and_eq(i16 %x, i16 %y) {
+; CHECK-LABEL: @scalar_i16_signbit_shl_and_eq(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i16 -32768, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i16 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i16 32768, %y
+  %and = and i16 %shl, %x
+  %r = icmp eq i16 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_shl_and_eq(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i64_signbit_shl_and_eq(i64 %x, i64 %y) {
+; CHECK-LABEL: @scalar_i64_signbit_shl_and_eq(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i64 -9223372036854775808, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i64 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i64 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i64 9223372036854775808, %y
+  %and = and i64 %shl, %x
+  %r = icmp eq i64 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_shl_and_ne(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_ne(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  %r = icmp ne i32 %and, 0  ; check 'ne' predicate
+  ret i1 %r
+}
+
+; Vector tests
+
+define <4 x i1> @vec_4xi32_signbit_shl_and_eq(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_shl_and_eq(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147483648>, %y
+  %and = and <4 x i32> %shl, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_shl_and_eq_undef1(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_shl_and_eq_undef1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], zeroinitializer
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %shl, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_shl_and_eq_undef2(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_shl_and_eq_undef2(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 0, i32 0, i32 0, i32 undef>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> <i32 2147483648, i32 2147483648, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %shl, %x
+  %r = icmp eq <4 x i32> %and, <i32 0, i32 0, i32 0, i32 undef>
+  ret <4 x i1> %r
+}
+
+define <4 x i1> @vec_4xi32_signbit_shl_and_eq_undef3(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: @vec_4xi32_signbit_shl_and_eq_undef3(
+; CHECK-NEXT:    [[SHL:%.*]] = shl <4 x i32> <i32 -2147483648, i32 undef, i32 -2147483648, i32 2147473648>, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and <4 x i32> [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq <4 x i32> [[AND]], <i32 undef, i32 0, i32 0, i32 0>
+; CHECK-NEXT:    ret <4 x i1> [[R]]
+;
+  %shl = shl <4 x i32> <i32 2147483648, i32 undef, i32 2147483648, i32 2147473648>, %y
+  %and = and <4 x i32> %shl, %x
+  %r = icmp eq <4 x i32> %and, <i32 undef, i32 0, i32 0, i32 0>
+  ret <4 x i1> %r
+}
+
+; Extra use
+
+; Fold happened
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_shl(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[XOR:%.*]] = xor i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[XOR]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %xor = xor i32 %shl, %z  ; extra use of shl
+  store i32 %xor, i32* %p
+  %and = and i32 %shl, %x
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_and(i32 %x, i32 %y, i32 %z, i32* %p) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[AND]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[MUL]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  %mul = mul i32 %and, %z  ; extra use of and
+  store i32 %mul, i32* %p
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Not fold
+define i1 @scalar_i32_signbit_shl_and_eq_extra_use_shl_and(i32 %x, i32 %y, i32 %z, i32* %p, i32* %q) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_extra_use_shl_and(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    store i32 [[AND]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    store i32 [[ADD]], i32* [[Q:%.*]], align 4
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  store i32 %and, i32* %p  ; extra use of and
+  %add = add i32 %shl, %z  ; extra use of shl
+  store i32 %add, i32* %q
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Negative tests
+
+; X is constant
+
+define i1 @scalar_i32_signbit_shl_and_eq_X_is_constant1(i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_X_is_constant1(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], 12345
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, 12345
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+define i1 @scalar_i32_signbit_shl_and_eq_X_is_constant2(i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_X_is_constant2(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], 1
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, 1
+  %r = icmp eq i32 %and, 0
+  ret i1 %r
+}
+
+; Check 'slt' predicate
+
+define i1 @scalar_i32_signbit_shl_and_slt(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_slt(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i32 [[AND]], 0
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  %r = icmp slt i32 %and, 0
+  ret i1 %r
+}
+
+; Compare with nonzero
+
+define i1 @scalar_i32_signbit_shl_and_eq_nonzero(i32 %x, i32 %y) {
+; CHECK-LABEL: @scalar_i32_signbit_shl_and_eq_nonzero(
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]]
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[AND]], 1
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %shl = shl i32 2147483648, %y
+  %and = and i32 %shl, %x
+  %r = icmp eq i32 %and, 1  ; should be comparing with 0
+  ret i1 %r
+}




More information about the llvm-commits mailing list