[llvm] [InstCombine] Add transforms for `icmp uPred (trunc x),(truncOrZext(y))` -> `icmp uPred x,y` (PR #71309)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 17 20:54:00 PST 2023


https://github.com/goldsteinn updated https://github.com/llvm/llvm-project/pull/71309

>From 81a0e62c02b47eba9e35ae66c570d288f5c497ac Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Sun, 22 Oct 2023 01:54:16 -0500
Subject: [PATCH 1/2] [InstCombine] Add tests for transforming `(icmp eq/ne
 trunc(x), truncOrZext(y))`; NFC

---
 .../InstCombine/icmp-of-trunc-ext.ll          | 280 ++++++++++++++++++
 1 file changed, 280 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll

diff --git a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
new file mode 100644
index 000000000000000..7cce1320e7b4b97
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
@@ -0,0 +1,280 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+declare void @llvm.assume(i1)
+declare void @use(i16)
+define i1 @icmp_trunc_x_trunc_y(i32 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i32 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp eq i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_fail_from_illegal1(i256 %x, i256 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_fail_from_illegal1(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i256 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i256 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i256 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i256 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i256 %x, 65536
+  %y_lb_only = icmp ult i256 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i256 %x to i16
+  %y16 = trunc i256 %y to i16
+  %r = icmp eq i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(i123 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i123 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i123 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i123 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i123 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp eq i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_2_illegal_anyways(i33 %x, i63 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_2_illegal_anyways(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i33 [[X:%.*]], 512
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i63 [[Y:%.*]], 512
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i9
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i63 [[Y]] to i9
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i9 [[Y16]], [[X16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i33 %x, 512
+  %y_lb_only = icmp ult i63 %y, 512
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i33 %x to i9
+  %y16 = trunc i63 %y to i9
+  %r = icmp ult i9 %y16, %x16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_3(i64 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_3(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i64 [[X:%.*]], 123
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 256
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[XI8:%.*]] = trunc i64 [[X]] to i8
+; CHECK-NEXT:    [[YI8:%.*]] = trunc i32 [[Y]] to i8
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[YI8]], [[XI8]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i64 %x, 123
+  %y_lb_only = icmp ult i32 %y, 256
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %xi8 = trunc i64 %x to i8
+  %yi8 = trunc i32 %y to i8
+  %r = icmp ule i8 %yi8, %xi8
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper(i32 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65537
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i32 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65537
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp ne i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper_2(i32 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper_2(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp slt i32 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp slt i32 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp ne i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_swap0(i33 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_swap0(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i33 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i33 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i33 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp ule i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_trunc_y_swap1(i33 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_trunc_y_swap1(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i33 [[X:%.*]], 65536
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i33 %x, 65536
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = trunc i33 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp ule i16 %y16, %x16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_zext_y(i32 %x, i8 %y) {
+; CHECK-LABEL: @icmp_trunc_x_zext_y(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i32 %x, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = zext i8 %y to i16
+  %r = icmp ugt i16 %x16, %y16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_zext_y_2(i32 %x, i8 %y) {
+; CHECK-LABEL: @icmp_trunc_x_zext_y_2(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i32 %x, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = zext i8 %y to i16
+  %r = icmp uge i16 %y16, %x16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_zext_y_3(i6 %x, i32 %y) {
+; CHECK-LABEL: @icmp_trunc_x_zext_y_3(
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = zext i6 [[X:%.*]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %y_lb_only = icmp ult i32 %y, 65536
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = zext i6 %x to i16
+  %y16 = trunc i32 %y to i16
+  %r = icmp ne i16 %y16, %x16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_zext_y_3_fail_illegal(i6 %x, i45 %y) {
+; CHECK-LABEL: @icmp_trunc_x_zext_y_3_fail_illegal(
+; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i45 [[Y:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = zext i6 [[X:%.*]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc i45 [[Y]] to i16
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %y_lb_only = icmp ult i45 %y, 65536
+  call void @llvm.assume(i1 %y_lb_only)
+  %x16 = zext i6 %x to i16
+  %y16 = trunc i45 %y to i16
+  %r = icmp ne i16 %y16, %x16
+  ret i1 %r
+}
+
+define i1 @icmp_trunc_x_zext_y_fail_multiuse(i32 %x, i8 %y) {
+; CHECK-LABEL: @icmp_trunc_x_zext_y_fail_multiuse(
+; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
+; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
+; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
+; CHECK-NEXT:    call void @use(i16 [[Y16]])
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x_lb_only = icmp ult i32 %x, 65536
+  call void @llvm.assume(i1 %x_lb_only)
+  %x16 = trunc i32 %x to i16
+  %y16 = zext i8 %y to i16
+  call void @use(i16 %y16)
+  %r = icmp ule i16 %x16, %y16
+  ret i1 %r
+}

>From ac60c783670648710d3985282de3edde07dcf8ff Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Sun, 22 Oct 2023 01:22:20 -0500
Subject: [PATCH 2/2] [InstCombine] Add transforms for `(icmp uPred (trunc
 x),(truncOrZext(y)))`->`(icmp uPred x,y)`

Three transforms (all commutative):
https://alive2.llvm.org/ce/z/Bc-nh4
---
 .../InstCombine/InstCombineCompares.cpp       | 58 +++++++++++++++++++
 .../InstCombine/InstCombineInternal.h         |  2 +
 .../Transforms/InstCombine/eq-of-parts.ll     | 26 ++-------
 .../InstCombine/icmp-of-trunc-ext.ll          | 44 ++++++--------
 4 files changed, 81 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9bc84c7dd6e1539..783668db2da7670 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1512,6 +1512,61 @@ Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
   return nullptr;
 }
 
+/// Fold icmp (trunc X), (trunc Y).
+/// Fold icmp (trunc X), (zext Y).
+Instruction *
+InstCombinerImpl::foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
+                                              const SimplifyQuery &Q) {
+  if (Cmp.isSigned())
+    return nullptr;
+
+  Value *X, *Y;
+  ICmpInst::Predicate Pred;
+  bool YIsZext = false;
+  // Try to match icmp (trunc X), (trunc Y)
+  if (match(&Cmp, m_ICmp(Pred, m_Trunc(m_Value(X)), m_Trunc(m_Value(Y))))) {
+    if (X->getType() != Y->getType() &&
+        (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
+      return nullptr;
+    if (!isDesirableIntType(X->getType()->getScalarSizeInBits()) &&
+        isDesirableIntType(Y->getType()->getScalarSizeInBits())) {
+      std::swap(X, Y);
+      Pred = Cmp.getSwappedPredicate(Pred);
+    }
+  }
+  // Try to match icmp (trunc X), (zext Y)
+  else if (match(&Cmp, m_c_ICmp(Pred, m_Trunc(m_Value(X)),
+                                m_OneUse(m_ZExt(m_Value(Y))))))
+
+    YIsZext = true;
+  else
+    return nullptr;
+
+  Type *TruncTy = Cmp.getOperand(0)->getType();
+  unsigned TruncBits = TruncTy->getScalarSizeInBits();
+
+  // If this transform will end up changing from desirable types -> undesirable
+  // types skip it.
+  if (isDesirableIntType(TruncBits) &&
+      !isDesirableIntType(X->getType()->getScalarSizeInBits()))
+    return nullptr;
+
+  // Check if the trunc is unneeded.
+  KnownBits KnownX = llvm::computeKnownBits(X, /*Depth*/ 0, Q);
+  if (KnownX.countMaxActiveBits() > TruncBits)
+    return nullptr;
+
+  if (!YIsZext) {
+    // If Y is also a trunc, make sure it is unneeded.
+    KnownBits KnownY = llvm::computeKnownBits(Y, /*Depth*/ 0, Q);
+    if (KnownY.countMaxActiveBits() > TruncBits)
+      return nullptr;
+  }
+
+  Value *NewY = Builder.CreateZExtOrTrunc(Y, X->getType());
+  return new ICmpInst(Pred, X, NewY);
+}
+
 /// Fold icmp (xor X, Y), C.
 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
                                                    BinaryOperator *Xor,
@@ -6879,6 +6934,9 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
   if (Instruction *Res = foldICmpUsingKnownBits(I))
     return Res;
 
+  if (Instruction *Res = foldICmpTruncWithTruncOrExt(I, Q))
+    return Res;
+
   // Test if the ICmpInst instruction is used exclusively by a select as
   // part of a minimum or maximum operation. If so, refrain from doing
   // any other folding. This helps out other analyses which understand
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 68a8fb676d8d909..0bbb22be71569f6 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -647,6 +647,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
                                       ConstantInt *C);
   Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc,
                                      const APInt &C);
+  Instruction *foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
+                                           const SimplifyQuery &Q);
   Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And,
                                    const APInt &C);
   Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor,
diff --git a/llvm/test/Transforms/InstCombine/eq-of-parts.ll b/llvm/test/Transforms/InstCombine/eq-of-parts.ll
index 57b15ae3b96e66e..217e37b85933949 100644
--- a/llvm/test/Transforms/InstCombine/eq-of-parts.ll
+++ b/llvm/test/Transforms/InstCombine/eq-of-parts.ll
@@ -584,17 +584,8 @@ define i1 @eq_21_not_adjacent(i32 %x, i32 %y) {
 
 define i1 @eq_shift_in_zeros(i32 %x, i32 %y) {
 ; CHECK-LABEL: @eq_shift_in_zeros(
-; CHECK-NEXT:    [[X_321:%.*]] = lshr i32 [[X:%.*]], 8
-; CHECK-NEXT:    [[X_1:%.*]] = trunc i32 [[X_321]] to i8
-; CHECK-NEXT:    [[X_32:%.*]] = lshr i32 [[X]], 16
-; CHECK-NEXT:    [[X_2:%.*]] = trunc i32 [[X_32]] to i24
-; CHECK-NEXT:    [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8
-; CHECK-NEXT:    [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8
-; CHECK-NEXT:    [[Y_32:%.*]] = lshr i32 [[Y]], 16
-; CHECK-NEXT:    [[Y_2:%.*]] = trunc i32 [[Y_32]] to i24
-; CHECK-NEXT:    [[C_1:%.*]] = icmp eq i8 [[X_1]], [[Y_1]]
-; CHECK-NEXT:    [[C_2:%.*]] = icmp eq i24 [[X_2]], [[Y_2]]
-; CHECK-NEXT:    [[C_210:%.*]] = and i1 [[C_2]], [[C_1]]
+; CHECK-NEXT:    [[C_210_UNSHIFTED:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[C_210:%.*]] = icmp ult i32 [[C_210_UNSHIFTED]], 256
 ; CHECK-NEXT:    ret i1 [[C_210]]
 ;
   %x.321 = lshr i32 %x, 8
@@ -1249,17 +1240,8 @@ define i1 @ne_21_not_adjacent(i32 %x, i32 %y) {
 
 define i1 @ne_shift_in_zeros(i32 %x, i32 %y) {
 ; CHECK-LABEL: @ne_shift_in_zeros(
-; CHECK-NEXT:    [[X_321:%.*]] = lshr i32 [[X:%.*]], 8
-; CHECK-NEXT:    [[X_1:%.*]] = trunc i32 [[X_321]] to i8
-; CHECK-NEXT:    [[X_32:%.*]] = lshr i32 [[X]], 16
-; CHECK-NEXT:    [[X_2:%.*]] = trunc i32 [[X_32]] to i24
-; CHECK-NEXT:    [[Y_321:%.*]] = lshr i32 [[Y:%.*]], 8
-; CHECK-NEXT:    [[Y_1:%.*]] = trunc i32 [[Y_321]] to i8
-; CHECK-NEXT:    [[Y_32:%.*]] = lshr i32 [[Y]], 16
-; CHECK-NEXT:    [[Y_2:%.*]] = trunc i32 [[Y_32]] to i24
-; CHECK-NEXT:    [[C_1:%.*]] = icmp ne i8 [[X_1]], [[Y_1]]
-; CHECK-NEXT:    [[C_2:%.*]] = icmp ne i24 [[X_2]], [[Y_2]]
-; CHECK-NEXT:    [[C_210:%.*]] = or i1 [[C_2]], [[C_1]]
+; CHECK-NEXT:    [[C_210_UNSHIFTED:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[C_210:%.*]] = icmp ugt i32 [[C_210_UNSHIFTED]], 255
 ; CHECK-NEXT:    ret i1 [[C_210]]
 ;
   %x.321 = lshr i32 %x, 8
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
index 7cce1320e7b4b97..85f67bfa335bb87 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
@@ -9,9 +9,7 @@ define i1 @icmp_trunc_x_trunc_y(i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[X]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i32 %x, 65536
@@ -51,9 +49,8 @@ define i1 @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(i123 %x, i32 %y)
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i123 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i123 [[X]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i123 %x, 65536
@@ -72,9 +69,8 @@ define i1 @icmp_trunc_x_trunc_y_2_illegal_anyways(i33 %x, i63 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i63 [[Y:%.*]], 512
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i9
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i63 [[Y]] to i9
-; CHECK-NEXT:    [[R:%.*]] = icmp ult i9 [[Y16]], [[X16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = zext nneg i33 [[X]] to i63
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i63 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i33 %x, 512
@@ -93,9 +89,8 @@ define i1 @icmp_trunc_x_trunc_y_3(i64 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 256
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[XI8:%.*]] = trunc i64 [[X]] to i8
-; CHECK-NEXT:    [[YI8:%.*]] = trunc i32 [[Y]] to i8
-; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[YI8]], [[XI8]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i64 %x, 123
@@ -156,9 +151,8 @@ define i1 @icmp_trunc_x_trunc_y_swap0(i33 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i33 [[X]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i33 %x, 65536
@@ -177,9 +171,8 @@ define i1 @icmp_trunc_x_trunc_y_swap1(i33 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i33 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i33 [[X]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i33 %x, 65536
@@ -196,9 +189,8 @@ define i1 @icmp_trunc_x_zext_y(i32 %x, i8 %y) {
 ; CHECK-LABEL: @icmp_trunc_x_zext_y(
 ; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp ugt i16 [[X16]], [[Y16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i32 %x, 65536
@@ -213,9 +205,8 @@ define i1 @icmp_trunc_x_zext_y_2(i32 %x, i8 %y) {
 ; CHECK-LABEL: @icmp_trunc_x_zext_y_2(
 ; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp uge i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %x_lb_only = icmp ult i32 %x, 65536
@@ -230,9 +221,8 @@ define i1 @icmp_trunc_x_zext_y_3(i6 %x, i32 %y) {
 ; CHECK-LABEL: @icmp_trunc_x_zext_y_3(
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = zext i6 [[X:%.*]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
-; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[Y16]], [[X16]]
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i6 [[X:%.*]] to i32
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %y_lb_only = icmp ult i32 %y, 65536



More information about the llvm-commits mailing list