[llvm] [InstCombine] Fold ZExt(i1) Pred lshr(A, BW - 1) => i1 Pred A s< 0 (PR #68244)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 13 00:47:09 PDT 2023
https://github.com/XChy updated https://github.com/llvm/llvm-project/pull/68244
>From 43430972c8900183a65eae84769f92d356994d6f Mon Sep 17 00:00:00 2001
From: XChy <xxs_chy at outlook.com>
Date: Wed, 4 Oct 2023 14:03:06 +0800
Subject: [PATCH 1/2] [InstCombine] Tests for the fold A < 0 Pred i1
---
llvm/test/Transforms/InstCombine/icmp-shr.ll | 52 ++++++++
.../InstCombine/icmp-xor-signbit.ll | 115 ++++++++++++++++++
2 files changed, 167 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/icmp-shr.ll b/llvm/test/Transforms/InstCombine/icmp-shr.ll
index f4dfa2edfa17710..be042845e1b7f32 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shr.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shr.ll
@@ -1589,3 +1589,55 @@ define i1 @exactly_one_set_signbit_wrong_shamt_signed(i8 %x, i8 %y) {
%r = icmp eq i8 %xsign, %yposz
ret i1 %r
}
+
+define i1 @slt_zero_ult_i1(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_ult_i1(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp ult i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_ult_i1_fail1(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_ult_i1_fail1(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 30
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = lshr i32 %a, 30
+ %cmp2 = icmp ult i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_ult_i1_fail2(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_ult_i1_fail2(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[A:%.*]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = ashr i32 %a, 31
+ %cmp2 = icmp ult i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_slt_i1_fail(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_slt_i1_fail(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp slt i32 %conv, %cmp1
+ ret i1 %cmp2
+}
diff --git a/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll b/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
index 29a18ebbdd94e16..22696f3eef0edda 100644
--- a/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
@@ -217,3 +217,118 @@ define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
ret <2 x i1> %c
}
+define i1 @slt_zero_eq_i1(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_eq_i1(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp eq i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_eq_i1_fail(i32 %a, i1 %b) {
+; CHECK-LABEL: @slt_zero_eq_i1_fail(
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[A:%.*]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %conv = zext i1 %b to i32
+ %cmp1 = ashr i32 %a, 31
+ %cmp2 = icmp eq i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_eq_ne_0(i32 %a) {
+; CHECK-LABEL: @slt_zero_eq_ne_0(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ne i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp eq i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_ne_ne_0(i32 %a) {
+; CHECK-LABEL: @slt_zero_ne_ne_0(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ne i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp ne i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define <4 x i1> @slt_zero_eq_ne_0_vec(<4 x i32> %a) {
+; CHECK-LABEL: @slt_zero_eq_ne_0_vec(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <4 x i32> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[CONV:%.*]] = zext <4 x i1> [[CMP]] to <4 x i32>
+; CHECK-NEXT: [[CMP1:%.*]] = lshr <4 x i32> [[A]], <i32 31, i32 31, i32 31, i32 31>
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <4 x i32> [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret <4 x i1> [[CMP2]]
+;
+ %cmp = icmp ne <4 x i32> %a, zeroinitializer
+ %conv = zext <4 x i1> %cmp to <4 x i32>
+ %cmp1 = lshr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
+ %cmp2 = icmp eq <4 x i32> %conv, %cmp1
+ ret <4 x i1> %cmp2
+}
+
+define i1 @slt_zero_ne_ne_b(i32 %a, i32 %b) {
+; CHECK-LABEL: @slt_zero_ne_ne_b(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ne i32 %a, %b
+ %conv = zext i1 %cmp to i32
+ %cmp1 = lshr i32 %a, 31
+ %cmp2 = icmp ne i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_eq_ne_0_fail1(i32 %a) {
+; CHECK-LABEL: @slt_zero_eq_ne_0_fail1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[A]], 31
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ne i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ %cmp1 = ashr i32 %a, 31
+ %cmp2 = icmp eq i32 %conv, %cmp1
+ ret i1 %cmp2
+}
+
+define i1 @slt_zero_eq_ne_0_fail2(i32 %a) {
+; CHECK-LABEL: @slt_zero_eq_ne_0_fail2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 30
+; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+;
+ %cmp = icmp ne i32 %a, 0
+ %conv = zext i1 %cmp to i32
+ %cmp1 = lshr i32 %a, 30
+ %cmp2 = icmp eq i32 %conv, %cmp1
+ ret i1 %cmp2
+}
>From 24950fd2f4138caed78fd5fb28decd00474cb6cb Mon Sep 17 00:00:00 2001
From: XChy <xxs_chy at outlook.com>
Date: Tue, 3 Oct 2023 23:27:02 +0800
Subject: [PATCH 2/2] [InstCombine] Fold A << BW-1 Pred ZExt(i1) into A < 0
Pred i1
---
.../InstCombine/InstCombineCompares.cpp | 17 +++++
llvm/test/Transforms/InstCombine/icmp-shr.ll | 62 +++++++++----------
.../InstCombine/icmp-xor-signbit.ll | 35 ++++-------
3 files changed, 59 insertions(+), 55 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 95b506f0e35faff..4b9b9e0ff3b628a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -7186,6 +7186,23 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
return R;
}
+
+ // Signbit test folds
+ // Fold (X u>> BitWidth - 1 Pred Zext(i1)) --> X s< 0 Pred i1
+ Value *X, *Y;
+ if ((I.isUnsigned() || I.isEquality()) &&
+ (Op0->hasOneUse() || Op1->hasOneUse()) &&
+ match(Op1, m_ZExt(m_Value(Y))) &&
+ (match(Op0, m_LShr(m_Value(X),
+ m_SpecificIntAllowUndef(
+ Op0->getType()->getScalarSizeInBits() - 1))) &&
+ Y->getType()->getScalarSizeInBits() == 1)) {
+
+ Value *SLTZero =
+ Builder.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
+ Value *Cmp = Builder.CreateICmp(Pred, SLTZero, Y, I.getName());
+ return replaceInstUsesWith(I, Cmp);
+ }
}
if (Instruction *Res = foldICmpEquality(I))
diff --git a/llvm/test/Transforms/InstCombine/icmp-shr.ll b/llvm/test/Transforms/InstCombine/icmp-shr.ll
index be042845e1b7f32..27eecb0f949d919 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shr.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shr.ll
@@ -1302,9 +1302,9 @@ define i1 @lshr_neg_sgt_zero(i8 %x) {
define i1 @exactly_one_set_signbit(i8 %x, i8 %y) {
; CHECK-LABEL: @exactly_one_set_signbit(
-; CHECK-NEXT: [[XOR_SIGNBITS:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[XOR_SIGNBITS]], 0
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%xsign = lshr i8 %x, 7
%ypos = icmp sgt i8 %y, -1
@@ -1317,9 +1317,9 @@ define i1 @exactly_one_set_signbit_use1(i8 %x, i8 %y) {
; CHECK-LABEL: @exactly_one_set_signbit_use1(
; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 7
; CHECK-NEXT: call void @use(i8 [[XSIGN]])
-; CHECK-NEXT: [[XOR_SIGNBITS:%.*]] = xor i8 [[X]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[XOR_SIGNBITS]], 0
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0
+; CHECK-NEXT: ret i1 [[TMP2]]
;
%xsign = lshr i8 %x, 7
call void @use(i8 %xsign)
@@ -1331,9 +1331,9 @@ define i1 @exactly_one_set_signbit_use1(i8 %x, i8 %y) {
define <2 x i1> @same_signbit(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @same_signbit(
-; CHECK-NEXT: [[XOR_SIGNBITS:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR_SIGNBITS]], <i8 -1, i8 -1>
-; CHECK-NEXT: ret <2 x i1> [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp sgt <2 x i8> [[TMP1]], <i8 -1, i8 -1>
+; CHECK-NEXT: ret <2 x i1> [[R1]]
;
%xsign = lshr <2 x i8> %x, <i8 7, i8 7>
%ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 -1>
@@ -1347,9 +1347,9 @@ define i1 @same_signbit_use2(i8 %x, i8 %y) {
; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1
; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8
; CHECK-NEXT: call void @use(i8 [[YPOSZ]])
-; CHECK-NEXT: [[XOR_SIGNBITS:%.*]] = xor i8 [[X:%.*]], [[Y]]
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[XOR_SIGNBITS]], -1
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]]
+; CHECK-NEXT: [[R1:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: ret i1 [[R1]]
;
%xsign = lshr i8 %x, 7
%ypos = icmp sgt i8 %y, -1
@@ -1382,9 +1382,10 @@ define i1 @same_signbit_use3(i8 %x, i8 %y) {
define <2 x i1> @same_signbit_poison_elts(<2 x i8> %x, <2 x i8> %y) {
; CHECK-LABEL: @same_signbit_poison_elts(
-; CHECK-NEXT: [[XOR_SIGNBITS:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR_SIGNBITS]], <i8 -1, i8 -1>
-; CHECK-NEXT: ret <2 x i1> [[R]]
+; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt <2 x i8> [[Y:%.*]], <i8 -1, i8 poison>
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
+; CHECK-NEXT: [[R1:%.*]] = xor <2 x i1> [[TMP1]], [[YPOS]]
+; CHECK-NEXT: ret <2 x i1> [[R1]]
;
%xsign = lshr <2 x i8> %x, <i8 7, i8 poison>
%ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 poison>
@@ -1397,11 +1398,10 @@ define <2 x i1> @same_signbit_poison_elts(<2 x i8> %x, <2 x i8> %y) {
define i1 @same_signbit_wrong_type(i8 %x, i32 %y) {
; CHECK-LABEL: @same_signbit_wrong_type(
-; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 7
; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i32 [[Y:%.*]], -1
-; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[XSIGN]], [[YPOSZ]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 0
+; CHECK-NEXT: [[R1:%.*]] = xor i1 [[TMP1]], [[YPOS]]
+; CHECK-NEXT: ret i1 [[R1]]
;
%xsign = lshr i8 %x, 7
%ypos = icmp sgt i32 %y, -1
@@ -1450,11 +1450,9 @@ define i1 @exactly_one_set_signbit_wrong_shr(i8 %x, i8 %y) {
define i1 @exactly_one_set_signbit_wrong_pred(i8 %x, i8 %y) {
; CHECK-LABEL: @exactly_one_set_signbit_wrong_pred(
-; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 7
-; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1
-; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[XSIGN]], [[YPOSZ]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R1:%.*]] = icmp slt i8 [[TMP1]], 0
+; CHECK-NEXT: ret i1 [[R1]]
;
%xsign = lshr i8 %x, 7
%ypos = icmp sgt i8 %y, -1
@@ -1592,10 +1590,10 @@ define i1 @exactly_one_set_signbit_wrong_shamt_signed(i8 %x, i8 %y) {
define i1 @slt_zero_ult_i1(i32 %a, i1 %b) {
; CHECK-LABEL: @slt_zero_ult_i1(
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true
+; CHECK-NEXT: [[CMP21:%.*]] = and i1 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i1 [[CMP21]]
;
%conv = zext i1 %b to i32
%cmp1 = lshr i32 %a, 31
@@ -1631,10 +1629,10 @@ define i1 @slt_zero_ult_i1_fail2(i32 %a, i1 %b) {
define i1 @slt_zero_slt_i1_fail(i32 %a, i1 %b) {
; CHECK-LABEL: @slt_zero_slt_i1_fail(
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true
+; CHECK-NEXT: [[CMP21:%.*]] = and i1 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret i1 [[CMP21]]
;
%conv = zext i1 %b to i32
%cmp1 = lshr i32 %a, 31
diff --git a/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll b/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
index 22696f3eef0edda..d08dca225328fef 100644
--- a/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll
@@ -219,10 +219,9 @@ define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
define i1 @slt_zero_eq_i1(i32 %a, i1 %b) {
; CHECK-LABEL: @slt_zero_eq_i1(
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[A:%.*]], -1
+; CHECK-NEXT: [[CMP21:%.*]] = xor i1 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: ret i1 [[CMP21]]
;
%conv = zext i1 %b to i32
%cmp1 = lshr i32 %a, 31
@@ -245,11 +244,8 @@ define i1 @slt_zero_eq_i1_fail(i32 %a, i1 %b) {
define i1 @slt_zero_eq_ne_0(i32 %a) {
; CHECK-LABEL: @slt_zero_eq_ne_0(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
@@ -260,11 +256,8 @@ define i1 @slt_zero_eq_ne_0(i32 %a) {
define i1 @slt_zero_ne_ne_0(i32 %a) {
; CHECK-LABEL: @slt_zero_ne_ne_0(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[CMP21:%.*]] = icmp sgt i32 [[A:%.*]], 0
+; CHECK-NEXT: ret i1 [[CMP21]]
;
%cmp = icmp ne i32 %a, 0
%conv = zext i1 %cmp to i32
@@ -275,11 +268,8 @@ define i1 @slt_zero_ne_ne_0(i32 %a) {
define <4 x i1> @slt_zero_eq_ne_0_vec(<4 x i32> %a) {
; CHECK-LABEL: @slt_zero_eq_ne_0_vec(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne <4 x i32> [[A:%.*]], zeroinitializer
-; CHECK-NEXT: [[CONV:%.*]] = zext <4 x i1> [[CMP]] to <4 x i32>
-; CHECK-NEXT: [[CMP1:%.*]] = lshr <4 x i32> [[A]], <i32 31, i32 31, i32 31, i32 31>
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <4 x i32> [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret <4 x i1> [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <4 x i32> [[A:%.*]], <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: ret <4 x i1> [[TMP1]]
;
%cmp = icmp ne <4 x i32> %a, zeroinitializer
%conv = zext <4 x i1> %cmp to <4 x i32>
@@ -291,10 +281,9 @@ define <4 x i1> @slt_zero_eq_ne_0_vec(<4 x i32> %a) {
define i1 @slt_zero_ne_ne_b(i32 %a, i32 %b) {
; CHECK-LABEL: @slt_zero_ne_ne_b(
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A]], 31
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[CMP1]], [[CONV]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A]], 0
+; CHECK-NEXT: [[CMP21:%.*]] = xor i1 [[TMP1]], [[CMP]]
+; CHECK-NEXT: ret i1 [[CMP21]]
;
%cmp = icmp ne i32 %a, %b
%conv = zext i1 %cmp to i32
More information about the llvm-commits
mailing list