[llvm] b2f6cf1 - [InstCombine] Fold lshr/ashr(or(neg(x), x), bw-1) --> zext/sext(icmp_ne(x,0)) (PR50816)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 13 06:48:07 PDT 2021


Author: Simon Pilgrim
Date: 2021-07-13T14:44:54+01:00
New Revision: b2f6cf14798ac738bc2c9b35bd83171e0771b7a3

URL: https://github.com/llvm/llvm-project/commit/b2f6cf14798ac738bc2c9b35bd83171e0771b7a3
DIFF: https://github.com/llvm/llvm-project/commit/b2f6cf14798ac738bc2c9b35bd83171e0771b7a3.diff

LOG: [InstCombine] Fold lshr/ashr(or(neg(x),x),bw-1) --> zext/sext(icmp_ne(x,0)) (PR50816)

Handle the missing fold reported in PR50816, which is a variant of the existing ashr(sub_nsw(X,Y),bw-1) --> sext(icmp_sgt(X,Y)) fold.

We also handle the lshr(or(neg(x),x),bw-1) --> zext(icmp_ne(x,0)) equivalent - https://alive2.llvm.org/ce/z/SnZmSj

We still allow multi uses of the neg(x) - as this is likely to let us further simplify other uses of the neg - but not multi uses of the or() which would increase instruction count.

Differential Revision: https://reviews.llvm.org/D105764

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
    llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 402e21eb6561e..ca5e473fdecba 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1139,6 +1139,10 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
 
     Value *Y;
     if (ShAmt == BitWidth - 1) {
+      // lshr i32 or(X,-X), 31 --> zext (X != 0)
+      if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
+        return new ZExtInst(Builder.CreateIsNotNull(X), Ty);
+
       // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
       if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
         return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
@@ -1323,11 +1327,16 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
       return new SExtInst(NewSh, Ty);
     }
 
-    // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
-    Value *Y;
-    if (ShAmt == BitWidth - 1 &&
-        match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
-      return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
+    if (ShAmt == BitWidth - 1) {
+      // ashr i32 or(X,-X), 31 --> sext (X != 0)
+      if (match(Op0, m_OneUse(m_c_Or(m_Neg(m_Value(X)), m_Deferred(X)))))
+        return new SExtInst(Builder.CreateIsNotNull(X), Ty);
+
+      // ashr i32 (X -nsw Y), 31 --> sext (X < Y)
+      Value *Y;
+      if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
+        return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
+    }
 
     // If the shifted-out value is known-zero, then this is an exact shift.
     if (!I.isExact() &&

diff  --git a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
index 21d7c8a62119b..d0860cbf10c45 100644
--- a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
@@ -74,9 +74,8 @@ define i64 @sub_ashr_or_i64(i64 %x, i64 %y) {
 
 define i32 @neg_or_ashr_i32(i32 %x) {
 ; CHECK-LABEL: @neg_or_ashr_i32(
-; CHECK-NEXT:    [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X:%.*]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = sext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;
   %neg = sub i32 0, %x
@@ -116,9 +115,8 @@ define i32 @sub_ashr_or_i32_commute(i32 %x, i32 %y) {
 define i32 @neg_or_ashr_i32_commute(i32 %x0) {
 ; CHECK-LABEL: @neg_or_ashr_i32_commute(
 ; CHECK-NEXT:    [[X:%.*]] = sdiv i32 42, [[X0:%.*]]
-; CHECK-NEXT:    [[NEG:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[X]], [[NEG]]
-; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = sext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;
   %x = sdiv i32 42, %x0 ; thwart complexity-based canonicalization
@@ -156,9 +154,8 @@ define <4 x i32> @sub_ashr_or_i32_vec_nuw_nsw(<4 x i32> %x, <4 x i32> %y) {
 
 define <4 x i32> @neg_or_ashr_i32_vec(<4 x i32> %x) {
 ; CHECK-LABEL: @neg_or_ashr_i32_vec(
-; CHECK-NEXT:    [[NEG:%.*]] = sub <4 x i32> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i32> [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = ashr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne <4 x i32> [[X:%.*]], zeroinitializer
+; CHECK-NEXT:    [[SHR:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SHR]]
 ;
   %neg = sub <4 x i32> zeroinitializer, %x
@@ -182,9 +179,8 @@ define <4 x i32> @sub_ashr_or_i32_vec_commute(<4 x i32> %x, <4 x i32> %y) {
 define <4 x i32> @neg_or_ashr_i32_vec_commute(<4 x i32> %x0) {
 ; CHECK-LABEL: @neg_or_ashr_i32_vec_commute(
 ; CHECK-NEXT:    [[X:%.*]] = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, [[X0:%.*]]
-; CHECK-NEXT:    [[NEG:%.*]] = sub nsw <4 x i32> zeroinitializer, [[X]]
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i32> [[X]], [[NEG]]
-; CHECK-NEXT:    [[SHR:%.*]] = ashr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne <4 x i32> [[X]], zeroinitializer
+; CHECK-NEXT:    [[SHR:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SHR]]
 ;
   %x = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %x0 ; thwart complexity-based canonicalization
@@ -228,8 +224,8 @@ define i32 @sub_ashr_or_i32_extra_use_or(i32 %x, i32 %y, i32* %p) {
 define i32 @neg_extra_use_or_ashr_i32(i32 %x, i32* %p) {
 ; CHECK-LABEL: @neg_extra_use_or_ashr_i32(
 ; CHECK-NEXT:    [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = sext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    store i32 [[NEG]], i32* [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
index 830b9f99b8eb5..e445d97ab3ca4 100644
--- a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll
@@ -5,9 +5,8 @@
 
 define i32 @neg_or_lshr_i32(i32 %x) {
 ; CHECK-LABEL: @neg_or_lshr_i32(
-; CHECK-NEXT:    [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X:%.*]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;
   %neg = sub i32 0, %x
@@ -21,9 +20,8 @@ define i32 @neg_or_lshr_i32(i32 %x) {
 define i32 @neg_or_lshr_i32_commute(i32 %x0) {
 ; CHECK-LABEL: @neg_or_lshr_i32_commute(
 ; CHECK-NEXT:    [[X:%.*]] = sdiv i32 42, [[X0:%.*]]
-; CHECK-NEXT:    [[NEG:%.*]] = sub nsw i32 0, [[X]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[X]], [[NEG]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;
   %x = sdiv i32 42, %x0 ; thwart complexity-based canonicalization
@@ -37,9 +35,8 @@ define i32 @neg_or_lshr_i32_commute(i32 %x0) {
 
 define <4 x i32> @neg_or_lshr_i32_vec(<4 x i32> %x) {
 ; CHECK-LABEL: @neg_or_lshr_i32_vec(
-; CHECK-NEXT:    [[NEG:%.*]] = sub <4 x i32> zeroinitializer, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i32> [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne <4 x i32> [[X:%.*]], zeroinitializer
+; CHECK-NEXT:    [[SHR:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SHR]]
 ;
   %neg = sub <4 x i32> zeroinitializer, %x
@@ -51,9 +48,8 @@ define <4 x i32> @neg_or_lshr_i32_vec(<4 x i32> %x) {
 define <4 x i32> @neg_or_lshr_i32_vec_commute(<4 x i32> %x0) {
 ; CHECK-LABEL: @neg_or_lshr_i32_vec_commute(
 ; CHECK-NEXT:    [[X:%.*]] = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, [[X0:%.*]]
-; CHECK-NEXT:    [[NEG:%.*]] = sub nsw <4 x i32> zeroinitializer, [[X]]
-; CHECK-NEXT:    [[OR:%.*]] = or <4 x i32> [[X]], [[NEG]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr <4 x i32> [[OR]], <i32 31, i32 31, i32 31, i32 31>
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne <4 x i32> [[X]], zeroinitializer
+; CHECK-NEXT:    [[SHR:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SHR]]
 ;
   %x = sdiv <4 x i32> <i32 42, i32 42, i32 42, i32 42>, %x0 ; thwart complexity-based canonicalization
@@ -68,8 +64,8 @@ define <4 x i32> @neg_or_lshr_i32_vec_commute(<4 x i32> %x0) {
 define i32 @neg_extra_use_or_lshr_i32(i32 %x, i32* %p) {
 ; CHECK-LABEL: @neg_extra_use_or_lshr_i32(
 ; CHECK-NEXT:    [[NEG:%.*]] = sub i32 0, [[X:%.*]]
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[NEG]], [[X]]
-; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[OR]], 31
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i32 [[X]], 0
+; CHECK-NEXT:    [[SHR:%.*]] = zext i1 [[TMP1]] to i32
 ; CHECK-NEXT:    store i32 [[NEG]], i32* [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[SHR]]
 ;


        


More information about the llvm-commits mailing list