[llvm] [InstCombine] Canonicalize Bit Testing by Shifting to Sign Bit (PR #101822)

Marius Kamp via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 10 23:59:34 PDT 2024


https://github.com/mskamp updated https://github.com/llvm/llvm-project/pull/101822

>From 27210f0ab8156a0f2e61355d69f34975698a6e55 Mon Sep 17 00:00:00 2001
From: Marius Kamp <msk at posteo.org>
Date: Tue, 23 Jul 2024 05:57:17 +0200
Subject: [PATCH 1/2] [InstCombine] Add Tests for Testing Bits; NFC

---
 .../Transforms/InstCombine/icmp-and-shift.ll  | 187 ++++++++++++++++++
 1 file changed, 187 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
index 08d23e84c39600..eca31dc730948c 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
@@ -606,3 +606,190 @@ define i1 @fold_ne_rhs_fail_shift_not_1s(i8 %x, i8 %yy) {
   %r = icmp ne i8 %and, 0
   ret i1 %r
 }
+
+define i1 @test_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 %a, %sub
+  %cmp = icmp slt i32 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
+; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 42, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 42, %sub
+  %cmp = icmp slt i32 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_not_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_not_shl_sub_bw_minus_1_slt_0(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 %a, %sub
+  %cmp = icmp sge i32 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_shl_nuw_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_shl_nuw_sub_bw_minus_1_slt_0(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl nuw i32 %a, %sub
+  %cmp = icmp slt i32 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_not_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
+; CHECK-LABEL: @test_not_const_shl_sub_bw_minus_1_slt_0(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 42, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 42, %sub
+  %cmp = icmp sge i32 %shl, 0
+  ret i1 %cmp
+}
+
+define <8 x i1> @test_shl_sub_bw_minus_1_slt_0_v8i8(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0_v8i8(
+; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    ret <8 x i1> [[CMP]]
+;
+  %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
+  %shl = shl <8 x i8> %a, %sub
+  %cmp = icmp slt <8 x i8> %shl, zeroinitializer
+  ret <8 x i1> %cmp
+}
+
+define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat(<8 x i8> %b) {
+; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat(
+; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    ret <8 x i1> [[CMP]]
+;
+  %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
+  %shl = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, %sub
+  %cmp = icmp slt <8 x i8> %shl, zeroinitializer
+  ret <8 x i1> %cmp
+}
+
+define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_1(<8 x i8> %b) {
+; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_1(
+; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 poison>, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    ret <8 x i1> [[CMP]]
+;
+  %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 poison>, %b
+  %shl = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, %sub
+  %cmp = icmp slt <8 x i8> %shl, zeroinitializer
+  ret <8 x i1> %cmp
+}
+
+define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_2(<8 x i8> %b) {
+; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_2(
+; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 poison>, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    ret <8 x i1> [[CMP]]
+;
+  %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
+  %shl = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 poison>, %sub
+  %cmp = icmp slt <8 x i8> %shl, zeroinitializer
+  ret <8 x i1> %cmp
+}
+
+define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_nonsplat(<8 x i8> %b) {
+; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_nonsplat(
+; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>, [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    ret <8 x i1> [[CMP]]
+;
+  %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
+  %shl = shl <8 x i8> <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>, %sub
+  %cmp = icmp slt <8 x i8> %shl, zeroinitializer
+  ret <8 x i1> %cmp
+}
+
+define i1 @test_shl_sub_non_bw_minus_1_slt_0_negative(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_shl_sub_non_bw_minus_1_slt_0_negative(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 32, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    ret i1 [[CMP]]
+;
+  %sub = sub i32 32, %b
+  %shl = shl i32 %a, %sub
+  %cmp = icmp slt i32 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_shl_sub_bw_minus_1_slt_0_i1_negative(i1 %a, i1 %b) {
+; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0_i1_negative(
+; CHECK-NEXT:    ret i1 [[A:%.*]]
+;
+  %sub = sub i1 0, %b
+  %shl = shl i1 %a, %sub
+  %cmp = icmp slt i1 %shl, 0
+  ret i1 %cmp
+}
+
+define i1 @test_shl_sub_bw_minus_1_slt_0_multi_use_sub_negative(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0_multi_use_sub_negative(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sgt i32 [[SUB]], [[B]]
+; CHECK-NEXT:    [[RET:%.*]] = or i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    ret i1 [[RET]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 %a, %sub
+  %cmp1 = icmp slt i32 %shl, 0
+  %cmp2 = icmp slt i32 %b, %sub
+  %ret = or i1 %cmp1, %cmp2
+  ret i1 %ret
+}
+
+define i1 @test_shl_sub_bw_minus_1_slt_0_multi_use_shl_negative(i32 %a, i32 %b) {
+; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0_multi_use_shl_negative(
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[SHL]], [[B]]
+; CHECK-NEXT:    [[RET:%.*]] = and i1 [[CMP1]], [[CMP2]]
+; CHECK-NEXT:    ret i1 [[RET]]
+;
+  %sub = sub i32 31, %b
+  %shl = shl i32 %a, %sub
+  %cmp1 = icmp slt i32 %shl, 0
+  %cmp2 = icmp eq i32 %b, %shl
+  %ret = and i1 %cmp1, %cmp2
+  ret i1 %ret
+}

>From f7751bcaad241519bb7ef4cc64a4466175bf5f84 Mon Sep 17 00:00:00 2001
From: Marius Kamp <msk at posteo.org>
Date: Mon, 1 Jul 2024 13:38:34 +0200
Subject: [PATCH 2/2] [InstCombine] Canonicalize Bit Testing by Shifting to
 Sign Bit

Implement a new transformations that folds the bit-testing expression
(icmp slt (shl V (sub (bw-1) B)) 0) to (icmp ne (and V (shl 1 B)) 0).
Also fold the negated variant of the LHS.

Alive proof: https://alive2.llvm.org/ce/z/5ic_qe

Relates to issue #86813.
---
 .../InstCombine/InstCombineCompares.cpp       | 22 ++++++--
 .../Transforms/InstCombine/icmp-and-shift.ll  | 54 +++++++++----------
 2 files changed, 45 insertions(+), 31 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 3b6df2760ecc24..f352836822b83c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2304,19 +2304,33 @@ Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
     if (C.isZero() || (Pred == ICmpInst::ICMP_SGT ? C.isAllOnes() : C.isOne()))
       return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
 
+  unsigned TypeBits = C.getBitWidth();
+  Value *X = Shl->getOperand(0);
+  Type *ShType = Shl->getType();
+
+  // (icmp slt (shl X, (sub bw-1, Y)), 0)  --> (icmp ne (and X, (shl 1, Y)), 0)
+  // (icmp sgt (shl X, (sub bw-1, Y)), -1) --> (icmp eq (and X, (shl 1, Y)), 0)
+  Value *Y;
+  if (Shl->hasOneUse() &&
+      ((Pred == ICmpInst::ICMP_SLT && C.isZero()) ||
+       (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())) &&
+      match(Shl->getOperand(1),
+            m_OneUse(m_Sub(m_SpecificInt(TypeBits - 1), m_Value(Y)))))
+    return new ICmpInst(
+        Pred == ICmpInst::ICMP_SLT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
+        Builder.CreateAnd(X, Builder.CreateShl(ConstantInt::get(ShType, 1), Y,
+                                               "", /*HasNUW=*/true)),
+        ConstantInt::get(ShType, 0));
+
   const APInt *ShiftAmt;
   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
     return foldICmpShlOne(Cmp, Shl, C);
 
   // Check that the shift amount is in range. If not, don't perform undefined
   // shifts. When the shift is visited, it will be simplified.
-  unsigned TypeBits = C.getBitWidth();
   if (ShiftAmt->uge(TypeBits))
     return nullptr;
 
-  Value *X = Shl->getOperand(0);
-  Type *ShType = Shl->getType();
-
   // NSW guarantees that we are only shifting out sign bits from the high bits,
   // so we can ASHR the compare constant without needing a mask and eliminate
   // the shift.
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
index eca31dc730948c..2a8797883b045c 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll
@@ -609,9 +609,9 @@ define i1 @fold_ne_rhs_fail_shift_not_1s(i8 %x, i8 %yy) {
 
 define i1 @test_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %sub = sub i32 31, %b
@@ -622,9 +622,9 @@ define i1 @test_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 
 define i1 @test_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
 ; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 42, [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 42
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %sub = sub i32 31, %b
@@ -635,9 +635,9 @@ define i1 @test_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
 
 define i1 @test_not_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_not_shl_sub_bw_minus_1_slt_0(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %sub = sub i32 31, %b
@@ -648,9 +648,9 @@ define i1 @test_not_shl_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 
 define i1 @test_shl_nuw_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_shl_nuw_sub_bw_minus_1_slt_0(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[SHL]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %sub = sub i32 31, %b
@@ -661,9 +661,9 @@ define i1 @test_shl_nuw_sub_bw_minus_1_slt_0(i32 %a, i32 %b) {
 
 define i1 @test_not_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
 ; CHECK-LABEL: @test_not_const_shl_sub_bw_minus_1_slt_0(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i32 31, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl i32 42, [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[SHL]], -1
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 42
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP2]], 0
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %sub = sub i32 31, %b
@@ -674,9 +674,9 @@ define i1 @test_not_const_shl_sub_bw_minus_1_slt_0(i32 %b) {
 
 define <8 x i1> @test_shl_sub_bw_minus_1_slt_0_v8i8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-LABEL: @test_shl_sub_bw_minus_1_slt_0_v8i8(
-; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> [[A:%.*]], [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i8> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    ret <8 x i1> [[CMP]]
 ;
   %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
@@ -687,9 +687,9 @@ define <8 x i1> @test_shl_sub_bw_minus_1_slt_0_v8i8(<8 x i8> %a, <8 x i8> %b) {
 
 define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat(<8 x i8> %b) {
 ; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat(
-; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>, [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i8> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    ret <8 x i1> [[CMP]]
 ;
   %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
@@ -713,9 +713,9 @@ define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_1(<8 x i8
 
 define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_2(<8 x i8> %b) {
 ; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_2(
-; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 poison>, [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 poison>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i8> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    ret <8 x i1> [[CMP]]
 ;
   %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b
@@ -726,9 +726,9 @@ define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_splat_poison_2(<8 x i8
 
 define <8 x i1> @test_const_shl_sub_bw_minus_1_slt_0_v8i8_nonsplat(<8 x i8> %b) {
 ; CHECK-LABEL: @test_const_shl_sub_bw_minus_1_slt_0_v8i8_nonsplat(
-; CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, [[B:%.*]]
-; CHECK-NEXT:    [[SHL:%.*]] = shl <8 x i8> <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>, [[SUB]]
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <8 x i8> [[SHL]], zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = and <8 x i8> [[TMP1]], <i8 42, i8 43, i8 44, i8 45, i8 46, i8 47, i8 48, i8 49>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <8 x i8> [[TMP2]], zeroinitializer
 ; CHECK-NEXT:    ret <8 x i1> [[CMP]]
 ;
   %sub = sub <8 x i8> <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>, %b



More information about the llvm-commits mailing list