[llvm] r337344 - Revert "[InstCombine] Fold 'check for [no] signed truncation' pattern"

Bob Haarman via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 17 19:18:28 PDT 2018


Author: inglorion
Date: Tue Jul 17 19:18:28 2018
New Revision: 337344

URL: http://llvm.org/viewvc/llvm-project?rev=337344&view=rev
Log:
Revert "[InstCombine] Fold 'check for [no] signed truncation' pattern"

This reverts r337190 (and a few follow-up commits), which caused the
Chromium build to fail. See
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/trunk/test/Transforms/InstCombine/canonicalize-lack-of-signed-truncation-check.ll
    llvm/trunk/test/Transforms/InstCombine/canonicalize-signed-truncation-check.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=337344&r1=337343&r2=337344&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp Tue Jul 17 19:18:28 2018
@@ -2945,72 +2945,6 @@ static Value *foldICmpWithLowBitMaskedVa
   return Builder.CreateICmp(DstPred, X, M);
 }
 
-/// Some comparisons can be simplified.
-/// In this case, we are looking for comparisons that look like
-/// a check for a lossy signed truncation.
-/// Folds:   (MaskedBits is a constant.)
-///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
-/// Into:
-///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
-/// Where  KeptBits = bitwidth(%x) - MaskedBits
-static Value *
-foldICmpWithTruncSignExtendedVal(ICmpInst &I,
-                                 InstCombiner::BuilderTy &Builder) {
-  ICmpInst::Predicate SrcPred;
-  Value *X;
-  const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
-  // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
-  if (!match(&I, m_c_ICmp(SrcPred,
-                          m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
-                                          m_APInt(C1))),
-                          m_Deferred(X))))
-    return nullptr;
-
-  // Potential handling of non-splats: for each element:
-  //  * if both are undef, replace with constant 0.
-  //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
-  //  * if both are not undef, and are different, bailout.
-  //  * else, only one is undef, then pick the non-undef one.
-
-  // The shift amount must be equal.
-  if (*C0 != *C1)
-    return nullptr;
-  const uint64_t MaskedBits = C0->getZExtValue();
-  assert(MaskedBits && "shift of %x by zero should be folded to %x already.");
-
-  ICmpInst::Predicate DstPred;
-  switch (SrcPred) {
-  case ICmpInst::Predicate::ICMP_EQ:
-    // ((%x << MaskedBits) a>> MaskedBits) == %x
-    //   =>
-    // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
-    DstPred = ICmpInst::Predicate::ICMP_ULT;
-    break;
-  case ICmpInst::Predicate::ICMP_NE:
-    // ((%x << MaskedBits) a>> MaskedBits) != %x
-    //   =>
-    // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
-    DstPred = ICmpInst::Predicate::ICMP_UGE;
-    break;
-  // FIXME: are more folds possible?
-  default:
-    return nullptr;
-  }
-
-  const uint64_t XBitWidth = C0->getBitWidth();
-  const uint64_t KeptBits = XBitWidth - MaskedBits;
-  const uint64_t ICmpCst = (uint64_t)1 << KeptBits; // (1 << KeptBits)
-  const uint64_t AddCst = ICmpCst >> 1;   // (1 << (KeptBits-1))
-
-  auto *XType = X->getType();
-  // (add %x, (1 << (KeptBits-1)))
-  Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
-  // add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
-  Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
-
-  return T1;
-}
-
 /// Try to fold icmp (binop), X or icmp X, (binop).
 /// TODO: A large part of this logic is duplicated in InstSimplify's
 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
@@ -3351,9 +3285,6 @@ Instruction *InstCombiner::foldICmpBinOp
   if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
     return replaceInstUsesWith(I, V);
 
-  if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
-    return replaceInstUsesWith(I, V);
-
   return nullptr;
 }
 

Modified: llvm/trunk/test/Transforms/InstCombine/canonicalize-lack-of-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/canonicalize-lack-of-signed-truncation-check.ll?rev=337344&r1=337343&r2=337344&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/canonicalize-lack-of-signed-truncation-check.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/canonicalize-lack-of-signed-truncation-check.ll Tue Jul 17 19:18:28 2018
@@ -15,12 +15,13 @@
 
 define i1 @p0(i8 %x) {
 ; CHECK-LABEL: @p0(
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
 }
@@ -31,64 +32,65 @@ define i1 @p0(i8 %x) {
 
 define <2 x i1> @p1_vec_splat(<2 x i8> %x) {
 ; CHECK-LABEL: @p1_vec_splat(
-; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult <2 x i8> [[TMP1]], <i8 8, i8 8>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 5>
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 4>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 4>
   %tmp2 = icmp eq <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }
 
 define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) {
 ; CHECK-LABEL: @p2_vec_nonsplat(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 6>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 5>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 5>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 6>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 6>
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 5>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 5>
   %tmp2 = icmp eq <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }
 
 define <3 x i1> @p3_vec_undef0(<3 x i8> %x) {
 ; CHECK-LABEL: @p3_vec_undef0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 undef, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 4, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 5, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 undef, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 4, i8 4>
   %tmp2 = icmp eq <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
 
 define <3 x i1> @p4_vec_undef1(<3 x i8> %x) {
 ; CHECK-LABEL: @p4_vec_undef1(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 5, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 undef, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 5, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 4, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 undef, i8 4>
   %tmp2 = icmp eq <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
 
 define <3 x i1> @p5_vec_undef2(<3 x i8> %x) {
 ; CHECK-LABEL: @p5_vec_undef2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 undef, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 undef, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 undef, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 undef, i8 4>
   %tmp2 = icmp eq <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
@@ -102,13 +104,14 @@ declare i8 @gen8()
 define i1 @c0() {
 ; CHECK-LABEL: @c0(
 ; CHECK-NEXT:    [[X:%.*]] = call i8 @gen8()
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[X]], [[TMP1]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %x = call i8 @gen8()
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp eq i8 %x, %tmp1 ; swapped order
   ret i1 %tmp2
 }
@@ -121,29 +124,29 @@ declare void @use8(i8)
 
 define i1 @n_oneuse0(i8 %x) {
 ; CHECK-LABEL: @n_oneuse0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP0]])
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
+  %tmp0 = shl i8 %x, 4
   call void @use8(i8 %tmp0)
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n_oneuse1(i8 %x) {
 ; CHECK-LABEL: @n_oneuse1(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP1]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   call void @use8(i8 %tmp1)
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
@@ -151,16 +154,16 @@ define i1 @n_oneuse1(i8 %x) {
 
 define i1 @n_oneuse2(i8 %x) {
 ; CHECK-LABEL: @n_oneuse2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP0]])
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP1]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
+  %tmp0 = shl i8 %x, 4
   call void @use8(i8 %tmp0)
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp1 = ashr exact i8 %tmp0, 4
   call void @use8(i8 %tmp1)
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
@@ -172,50 +175,50 @@ define i1 @n_oneuse2(i8 %x) {
 
 define i1 @n0(i8 %x) {
 ; CHECK-LABEL: @n0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 3
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 3 ; not 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 3 ; not 4
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n1(i8 %x) {
 ; CHECK-LABEL: @n1(
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i8 [[X:%.*]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i8 [[X:%.*]], 16
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = lshr exact i8 %tmp0, 5 ; not ashr
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = lshr exact i8 %tmp0, 4 ; not ashr
   %tmp2 = icmp eq i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n2(i8 %x, i8 %y) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp eq i8 %tmp1, %y ; not %x
   ret i1 %tmp2
 }
 
 define <2 x i1> @n3_vec_nonsplat(<2 x i8> %x) {
 ; CHECK-LABEL: @n3_vec_nonsplat(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 3>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 3>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 3> ; 3 instead of 5
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 4>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 3> ; 3 instead of 4
   %tmp2 = icmp eq <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }

Modified: llvm/trunk/test/Transforms/InstCombine/canonicalize-signed-truncation-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/canonicalize-signed-truncation-check.ll?rev=337344&r1=337343&r2=337344&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/canonicalize-signed-truncation-check.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/canonicalize-signed-truncation-check.ll Tue Jul 17 19:18:28 2018
@@ -15,12 +15,13 @@
 
 define i1 @p0(i8 %x) {
 ; CHECK-LABEL: @p0(
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i8 [[TMP1]], 7
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
 }
@@ -31,64 +32,65 @@ define i1 @p0(i8 %x) {
 
 define <2 x i1> @p1_vec_splat(<2 x i8> %x) {
 ; CHECK-LABEL: @p1_vec_splat(
-; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt <2 x i8> [[TMP1]], <i8 7, i8 7>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 5>
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 4>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 4>
   %tmp2 = icmp ne <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }
 
 define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) {
 ; CHECK-LABEL: @p2_vec_nonsplat(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 6>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 5>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 5>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 6>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 6>
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 5>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 5>
   %tmp2 = icmp ne <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }
 
 define <3 x i1> @p3_vec_undef0(<3 x i8> %x) {
 ; CHECK-LABEL: @p3_vec_undef0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 undef, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 4, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 5, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 undef, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 4, i8 4>
   %tmp2 = icmp ne <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
 
 define <3 x i1> @p4_vec_undef1(<3 x i8> %x) {
 ; CHECK-LABEL: @p4_vec_undef1(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 5, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 undef, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 5, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 4, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 undef, i8 4>
   %tmp2 = icmp ne <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
 
 define <3 x i1> @p5_vec_undef2(<3 x i8> %x) {
 ; CHECK-LABEL: @p5_vec_undef2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 4, i8 undef, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 4, i8 undef, i8 4>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <3 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <3 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
-  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
+  %tmp0 = shl <3 x i8> %x, <i8 4, i8 undef, i8 4>
+  %tmp1 = ashr exact <3 x i8> %tmp0, <i8 4, i8 undef, i8 4>
   %tmp2 = icmp ne <3 x i8> %tmp1, %x
   ret <3 x i1> %tmp2
 }
@@ -102,13 +104,14 @@ declare i8 @gen8()
 define i1 @c0() {
 ; CHECK-LABEL: @c0(
 ; CHECK-NEXT:    [[X:%.*]] = call i8 @gen8()
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i8 [[TMP1]], 7
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[X]], [[TMP1]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
   %x = call i8 @gen8()
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp ne i8 %x, %tmp1 ; swapped order
   ret i1 %tmp2
 }
@@ -121,29 +124,29 @@ declare void @use8(i8)
 
 define i1 @n_oneuse0(i8 %x) {
 ; CHECK-LABEL: @n_oneuse0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP0]])
-; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[X]], 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i8 [[TMP1]], 7
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
+  %tmp0 = shl i8 %x, 4
   call void @use8(i8 %tmp0)
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n_oneuse1(i8 %x) {
 ; CHECK-LABEL: @n_oneuse1(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP1]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   call void @use8(i8 %tmp1)
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
@@ -151,16 +154,16 @@ define i1 @n_oneuse1(i8 %x) {
 
 define i1 @n_oneuse2(i8 %x) {
 ; CHECK-LABEL: @n_oneuse2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP0]])
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[TMP1]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
+  %tmp0 = shl i8 %x, 4
   call void @use8(i8 %tmp0)
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp1 = ashr exact i8 %tmp0, 4
   call void @use8(i8 %tmp1)
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
@@ -172,50 +175,50 @@ define i1 @n_oneuse2(i8 %x) {
 
 define i1 @n0(i8 %x) {
 ; CHECK-LABEL: @n0(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 3
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 3 ; not 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 3 ; not 4
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n1(i8 %x) {
 ; CHECK-LABEL: @n1(
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ugt i8 [[X:%.*]], 15
 ; CHECK-NEXT:    ret i1 [[TMP1]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = lshr exact i8 %tmp0, 5 ; not ashr
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = lshr exact i8 %tmp0, 4 ; not ashr
   %tmp2 = icmp ne i8 %tmp1, %x
   ret i1 %tmp2
 }
 
 define i1 @n2(i8 %x, i8 %y) {
 ; CHECK-LABEL: @n2(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
+; CHECK-NEXT:    [[TMP0:%.*]] = shl i8 [[X:%.*]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i8 [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i1 [[TMP2]]
 ;
-  %tmp0 = shl i8 %x, 5
-  %tmp1 = ashr exact i8 %tmp0, 5
+  %tmp0 = shl i8 %x, 4
+  %tmp1 = ashr exact i8 %tmp0, 4
   %tmp2 = icmp ne i8 %tmp1, %y ; not %x
   ret i1 %tmp2
 }
 
 define <2 x i1> @n3_vec_nonsplat(<2 x i8> %x) {
 ; CHECK-LABEL: @n3_vec_nonsplat(
-; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 5>
-; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 3>
+; CHECK-NEXT:    [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 4, i8 4>
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 4, i8 3>
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i8> [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret <2 x i1> [[TMP2]]
 ;
-  %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
-  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 3> ; 3 instead of 5
+  %tmp0 = shl <2 x i8> %x, <i8 4, i8 4>
+  %tmp1 = ashr exact <2 x i8> %tmp0, <i8 4, i8 3> ; 3 instead of 4
   %tmp2 = icmp ne <2 x i8> %tmp1, %x
   ret <2 x i1> %tmp2
 }




More information about the llvm-commits mailing list