[PATCH] D74294: [InstCombine] Relax preconditions for ashr+and+icmp fold (PR44754)

Nikita Popov via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 9 01:15:33 PST 2020


nikic created this revision.
nikic added reviewers: spatel, lebedev.ri, craig.topper.
Herald added subscribers: llvm-commits, hiraditya.
Herald added a project: LLVM.

Fix for https://bugs.llvm.org/show_bug.cgi?id=44754. We already have a fold that converts `icmp (and (ashr X, C3), C2), C1` into `icmp (and C2'), C1'`, but it imposed overly strict requirements on the transform.

Relax this by checking that both C2 and C1 don't shift out bits (in a signed sense) when forming the new constants. Alive proofs (https://rise4fun.com/Alive/PTz0):

  Name: ashr_legal
  Pre: ((C2 << C3) >> C3) == C2 && ((C1 << C3) >> C3) == C1
  %a = ashr i16 %x, C3
  %b = and i16 %a, C2
  %c = icmp i16 %b, C1
  =>
  %d = and i16 %x, C2 << C3
  %c = icmp i16 %d, C1 << C3
  
  Name: ashr_shiftout_eq
  Pre: ((C2 << C3) >> C3) == C2 && ((C1 << C3) >> C3) != C1
  %a = ashr i16 %x, C3
  %b = and i16 %a, C2
  %c = icmp eq i16 %b, C1
  =>
  %c = false

Note that `>>` corresponds to `ashr` here. The case of an equality comparison has some special handling in this transform, because it will form to a true/false result if the condition on the comparison constant it violated.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D74294

Files:
  llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
  llvm/test/Transforms/InstCombine/icmp.ll


Index: llvm/test/Transforms/InstCombine/icmp.ll
===================================================================
--- llvm/test/Transforms/InstCombine/icmp.ll
+++ llvm/test/Transforms/InstCombine/icmp.ll
@@ -1864,9 +1864,8 @@
 
 define i1 @icmp_and_ashr_neg_and_legal(i8 %x) {
 ; CHECK-LABEL: @icmp_and_ashr_neg_and_legal(
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ASHR]], -2
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[AND]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], -32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[TMP1]], 16
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %ashr = ashr i8 %x, 4
@@ -1891,9 +1890,8 @@
 
 define i1 @icmp_and_ashr_neg_cmp_slt_legal(i8 %x) {
 ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_slt_legal(
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ASHR]], -2
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[AND]], -4
+; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], -32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[TMP1]], -64
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %ashr = ashr i8 %x, 4
@@ -1918,9 +1916,8 @@
 
 define i1 @icmp_and_ashr_neg_cmp_eq_legal(i8 %x) {
 ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_legal(
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ASHR]], -2
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[AND]], -4
+; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], -32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[TMP1]], -64
 ; CHECK-NEXT:    ret i1 [[CMP]]
 ;
   %ashr = ashr i8 %x, 4
@@ -1931,10 +1928,7 @@
 
 define i1 @icmp_and_ashr_neg_cmp_eq_shiftout(i8 %x) {
 ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_shiftout(
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[ASHR]], -2
-; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[AND]], -68
-; CHECK-NEXT:    ret i1 [[CMP]]
+; CHECK-NEXT:    ret i1 false
 ;
   %ashr = ashr i8 %x, 4
   %and = and i8 %ashr, -2
Index: llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
===================================================================
--- llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1666,17 +1666,13 @@
       if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
         return nullptr;
     } else {
-      // For an arithmetic shift right we can do the same, if we ensure
-      // the And doesn't use any bits being shifted in. Normally these would
-      // be turned into lshr by SimplifyDemandedBits, but not if there is an
-      // additional user.
+      // For an arithmetic shift, check that both constants don't use (in a
+      // signed sense) the top bits being shifted out.
       assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
       NewCmpCst = C1.shl(*C3);
       NewAndCst = C2.shl(*C3);
-      AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
-      if (NewAndCst.lshr(*C3) != C2)
-        return nullptr;
-      if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
+      AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
+      if (NewAndCst.ashr(*C3) != C2)
         return nullptr;
     }
 


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D74294.243437.patch
Type: text/x-patch
Size: 3253 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20200209/391053e0/attachment.bin>


More information about the llvm-commits mailing list