[llvm] [InstCombine] Add transforms `(icmp spred (and X, Y), X)` if `X` or `Y` are known signed/unsigned (PR #94417)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 5 13:15:59 PDT 2024
https://github.com/goldsteinn updated https://github.com/llvm/llvm-project/pull/94417
>From b05dce7b9191a3652fb5907aa0bfbd61caae9ebe Mon Sep 17 00:00:00 2001
From: Noah Goldstein <goldstein.w.n at gmail.com>
Date: Tue, 4 Jun 2024 12:14:46 -0500
Subject: [PATCH] [InstCombine] Add transforms `(icmp spred (and X, Y), X)` if
`X` or `Y` are known signed/unsigned
Several transforms:
1) If known `Y < 0`:
- slt -> ult: https://alive2.llvm.org/ce/z/9zt2iK
- sle -> ule: https://alive2.llvm.org/ce/z/SPoPNF
- sgt -> ugt: https://alive2.llvm.org/ce/z/IGNxAk
- sge -> uge: https://alive2.llvm.org/ce/z/joqTvR
2) If known `Y >= 0`:
- `(X & PosY) s> X --> X s< 0`
- https://alive2.llvm.org/ce/z/7e-5BQ
- `(X & PosY) s> X --> X s< 0`
- https://alive2.llvm.org/ce/z/jvT4Gb
3) If known `X < 0`:
- `(NegX & Y) s> NegX --> Y s>= 0`
- https://alive2.llvm.org/ce/z/ApkaEh
- `(NegX & Y) s<= NegX --> Y s< 0`
- https://alive2.llvm.org/ce/z/oRnfHp
---
.../InstCombine/InstCombineCompares.cpp | 29 ++++++++
...t-low-bit-mask-and-icmp-sge-to-icmp-sle.ll | 3 +-
...t-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll | 3 +-
...t-low-bit-mask-and-icmp-sle-to-icmp-sle.ll | 3 +-
...t-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll | 3 +-
.../InstCombine/icmp-and-lowbit-mask.ll | 72 +++++++++----------
.../Transforms/InstCombine/icmp-of-and-x.ll | 38 ++++------
7 files changed, 80 insertions(+), 71 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 89193f8ff94b6..6f2ff3010cd75 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4745,6 +4745,35 @@ static Instruction *foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q,
Constant::getNullValue(Op1->getType()));
}
+ if (!ICmpInst::isSigned(Pred))
+ return nullptr;
+
+ KnownBits KnownY = IC.computeKnownBits(A, /*Depth=*/0, &I);
+ // (X & NegY) spred X --> (X & NegY) upred X
+ if (KnownY.isNegative())
+ return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
+
+ if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGT)
+ return nullptr;
+
+ if (KnownY.isNonNegative())
+ // (X & PosY) s<= X --> X s>= 0
+ // (X & PosY) s> X --> X s< 0
+ return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
+ Constant::getNullValue(Op1->getType()));
+
+ if (isKnownNegative(Op1, IC.getSimplifyQuery().getWithInstruction(&I))) {
+ // (NegX & Y) s> NegX --> Y s>= 0
+ if (Pred == ICmpInst::ICMP_SGT)
+ return new ICmpInst(ICmpInst::ICMP_SGE, A,
+ Constant::getNullValue(A->getType()));
+
+ // (NegX & Y) s<= NegX --> Y s< 0
+ if (Pred == ICmpInst::ICMP_SLE)
+ return new ICmpInst(ICmpInst::ICMP_SLT, A,
+ Constant::getNullValue(A->getType()));
+ }
+
return nullptr;
}
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll
index ae503bfb1cfe2..e103fe9440986 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sge-to-icmp-sle.ll
@@ -98,8 +98,7 @@ declare i8 @gen8()
define i1 @c0() {
; CHECK-LABEL: @c0(
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X]], 3
-; CHECK-NEXT: [[RET:%.*]] = icmp sge i8 [[X]], [[TMP0]]
+; CHECK-NEXT: [[RET:%.*]] = icmp sgt i8 [[X]], -1
; CHECK-NEXT: ret i1 [[RET]]
;
%x = call i8 @gen8()
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll
index d1dd411ee86b3..bbd733e86a32d 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sgt-to-icmp-sgt.ll
@@ -125,8 +125,7 @@ define i1 @oneuse0() {
define i1 @c0(i8 %x) {
; CHECK-LABEL: @c0(
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3
-; CHECK-NEXT: [[RET:%.*]] = icmp sgt i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = and i8 %x, 3
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll
index 4bed21a525f05..b167c8ad25aa9 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-sle-to-icmp-sle.ll
@@ -113,8 +113,7 @@ define i1 @oneuse0() {
define i1 @c0(i8 %x) {
; CHECK-LABEL: @c0(
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3
-; CHECK-NEXT: [[RET:%.*]] = icmp sle i8 [[TMP0]], [[X]]
+; CHECK-NEXT: [[RET:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: ret i1 [[RET]]
;
%tmp0 = and i8 %x, 3
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll
index 8415204a4915a..8281502447732 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-slt-to-icmp-sgt.ll
@@ -108,8 +108,7 @@ declare i8 @gen8()
define i1 @c0() {
; CHECK-LABEL: @c0(
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
-; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X]], 3
-; CHECK-NEXT: [[RET:%.*]] = icmp slt i8 [[X]], [[TMP0]]
+; CHECK-NEXT: [[RET:%.*]] = icmp slt i8 [[X]], 0
; CHECK-NEXT: ret i1 [[RET]]
;
%x = call i8 @gen8()
diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
index 8bb7fd0e522cb..0aace5f52c96c 100644
--- a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll
@@ -7,8 +7,8 @@ define i1 @src_is_mask_zext(i16 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_zext(
; CHECK-NEXT: [[M_IN:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = zext i8 [[M_IN]] to i16
-; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i16 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i16 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i16 %x_in, 123
@@ -83,8 +83,8 @@ define i1 @src_is_mask_and(i8 %x_in, i8 %y, i8 %z) {
; CHECK-NEXT: [[MY:%.*]] = lshr i8 7, [[Y:%.*]]
; CHECK-NEXT: [[MZ:%.*]] = lshr i8 -1, [[Z:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = and i8 [[MY]], [[MZ]]
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -121,8 +121,8 @@ define i1 @src_is_mask_or(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_or(
; CHECK-NEXT: [[MY:%.*]] = lshr i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = and i8 [[MY]], 7
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -138,8 +138,8 @@ define i1 @src_is_mask_xor(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_xor(
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -173,8 +173,8 @@ define i1 @src_is_mask_select(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -249,8 +249,8 @@ define i1 @src_is_mask_lshr(i8 %x_in, i8 %y, i8 %z, i1 %cond) {
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[SMASK]], [[Z:%.*]]
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -269,8 +269,8 @@ define i1 @src_is_mask_ashr(i8 %x_in, i8 %y, i8 %z, i1 %cond) {
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15
; CHECK-NEXT: [[MASK:%.*]] = ashr i8 [[SMASK]], [[Z:%.*]]
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -287,8 +287,8 @@ define i1 @src_is_mask_p2_m1(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_p2_m1(
; CHECK-NEXT: [[P2ORZ:%.*]] = shl i8 2, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = add i8 [[P2ORZ]], -1
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -304,8 +304,8 @@ define i1 @src_is_mask_umax(i8 %x_in, i8 %y) {
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umax.i8(i8 [[YMASK]], i8 3)
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -324,8 +324,8 @@ define i1 @src_is_mask_umin(i8 %x_in, i8 %y, i8 %z) {
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[ZMASK:%.*]] = lshr i8 15, [[Z:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 [[ZMASK]])
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -364,8 +364,8 @@ define i1 @src_is_mask_smax(i8 %x_in, i8 %y) {
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smax.i8(i8 [[YMASK]], i8 -1)
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -383,8 +383,8 @@ define i1 @src_is_mask_smin(i8 %x_in, i8 %y) {
; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1
; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smin.i8(i8 [[YMASK]], i8 0)
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -401,8 +401,8 @@ define i1 @src_is_mask_bitreverse_not_mask(i8 %x_in, i8 %y) {
; CHECK-LABEL: @src_is_mask_bitreverse_not_mask(
; CHECK-NEXT: [[NMASK:%.*]] = shl nsw i8 -1, [[Y:%.*]]
; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NMASK]])
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[MASK]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]]
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -455,9 +455,9 @@ define i1 @src_is_notmask_shl(i8 %x_in, i8 %y, i1 %cond) {
define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_notmask_x_xor_neg_x(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[NEG_Y:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[NEG_Y]], [[Y]]
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 7
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -473,9 +473,9 @@ define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) {
define i1 @src_is_notmask_x_xor_neg_x_inv(i8 %x_in, i8 %y, i1 %cond) {
; CHECK-LABEL: @src_is_notmask_x_xor_neg_x_inv(
; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[NEG_Y:%.*]] = add i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[NEG_Y]], [[Y]]
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 7
+; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1
+; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7
; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]]
; CHECK-NEXT: ret i1 [[R]]
;
@@ -625,9 +625,7 @@ define i1 @src_is_notmask_xor_fail(i8 %x_in, i8 %y) {
define i1 @src_is_mask_const_slt(i8 %x_in) {
; CHECK-LABEL: @src_is_mask_const_slt(
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], 7
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X]], [[AND]]
+; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X_IN:%.*]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
@@ -650,9 +648,7 @@ define i1 @src_is_mask_const_sgt(i8 %x_in) {
define i1 @src_is_mask_const_sle(i8 %x_in) {
; CHECK-LABEL: @src_is_mask_const_sle(
-; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], 31
-; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[AND]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X_IN:%.*]], -1
; CHECK-NEXT: ret i1 [[R]]
;
%x = xor i8 %x_in, 123
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
index 0f26be12c39cc..75badabda01ae 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll
@@ -58,7 +58,7 @@ define i1 @icmp_sge_x_negy(i8 %x, i8 %y) {
; CHECK-NEXT: [[CY:%.*]] = icmp slt i8 [[Y:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CY]])
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sge i8 [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp eq i8 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[Z]]
;
%cy = icmp slt i8 %y, 0
@@ -74,7 +74,7 @@ define i1 @icmp_slt_x_negy(i8 %x, i8 %y) {
; CHECK-NEXT: br i1 [[CY]], label [[NEGY:%.*]], label [[POSY:%.*]]
; CHECK: negy:
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[Z:%.*]] = icmp slt i8 [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp ne i8 [[AND]], [[X]]
; CHECK-NEXT: ret i1 [[Z]]
; CHECK: posy:
; CHECK-NEXT: [[R:%.*]] = call i1 @barrier()
@@ -116,10 +116,7 @@ posy:
define i1 @icmp_sle_x_negy(i8 %x, i8 %yy) {
; CHECK-LABEL: @icmp_sle_x_negy(
-; CHECK-NEXT: [[Y:%.*]] = or i8 [[YY:%.*]], -128
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sle i8 [[AND]], [[X]]
-; CHECK-NEXT: ret i1 [[Z]]
+; CHECK-NEXT: ret i1 true
;
%y = or i8 %yy, 128
%and = and i8 %y, %x
@@ -129,10 +126,7 @@ define i1 @icmp_sle_x_negy(i8 %x, i8 %yy) {
define <2 x i1> @icmp_sgt_x_negy(<2 x i8> %x, <2 x i8> %yy) {
; CHECK-LABEL: @icmp_sgt_x_negy(
-; CHECK-NEXT: [[Y:%.*]] = or <2 x i8> [[YY:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sgt <2 x i8> [[AND]], [[X]]
-; CHECK-NEXT: ret <2 x i1> [[Z]]
+; CHECK-NEXT: ret <2 x i1> zeroinitializer
;
%y = or <2 x i8> %yy, <i8 128, i8 128>
%and = and <2 x i8> %y, %x
@@ -155,9 +149,7 @@ define <2 x i1> @icmp_sgt_x_negy_fail_partial(<2 x i8> %x, <2 x i8> %yy) {
define <2 x i1> @icmp_sle_x_posy(<2 x i8> %x, <2 x i8> %yy) {
; CHECK-LABEL: @icmp_sle_x_posy(
-; CHECK-NEXT: [[Y:%.*]] = and <2 x i8> [[YY:%.*]], <i8 127, i8 127>
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[Y]], [[X:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sle <2 x i8> [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp sgt <2 x i8> [[X:%.*]], <i8 -1, i8 -1>
; CHECK-NEXT: ret <2 x i1> [[Z]]
;
%y = and <2 x i8> %yy, <i8 127, i8 127>
@@ -183,8 +175,7 @@ define i1 @icmp_sgt_x_posy(i8 %x, i8 %y) {
; CHECK-LABEL: @icmp_sgt_x_posy(
; CHECK-NEXT: [[CY:%.*]] = icmp sgt i8 [[Y:%.*]], -1
; CHECK-NEXT: call void @llvm.assume(i1 [[CY]])
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sgt i8 [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: ret i1 [[Z]]
;
%cy = icmp sge i8 %y, 0
@@ -196,9 +187,7 @@ define i1 @icmp_sgt_x_posy(i8 %x, i8 %y) {
define <2 x i1> @icmp_sgt_negx_y(<2 x i8> %xx, <2 x i8> %y) {
; CHECK-LABEL: @icmp_sgt_negx_y(
-; CHECK-NEXT: [[X:%.*]] = or <2 x i8> [[XX:%.*]], <i8 -128, i8 -128>
-; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sgt <2 x i8> [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp sgt <2 x i8> [[Y:%.*]], <i8 -1, i8 -1>
; CHECK-NEXT: ret <2 x i1> [[Z]]
;
%x = or <2 x i8> %xx, <i8 128, i8 128>
@@ -211,8 +200,7 @@ define i1 @icmp_sle_negx_y(i8 %x, i8 %y) {
; CHECK-LABEL: @icmp_sle_negx_y(
; CHECK-NEXT: [[CX:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CX]])
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = icmp sle i8 [[AND]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = icmp slt i8 [[Y:%.*]], 0
; CHECK-NEXT: ret i1 [[Z]]
;
%cx = icmp slt i8 %x, 0
@@ -239,9 +227,9 @@ define i1 @icmp_sle_negx_y_fail_maybe_zero(i8 %x, i8 %y) {
define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y_todo(
-; CHECK-NEXT: [[YY:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%yy = select i1 %y, i8 7, i8 24
@@ -252,8 +240,8 @@ define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) {
define i1 @icmp_eq_x_invertable_y(i8 %x, i8 %y) {
; CHECK-LABEL: @icmp_eq_x_invertable_y(
-; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%yy = xor i8 %y, -1
More information about the llvm-commits
mailing list