[llvm] 0c400e8 - [InstCombine] fold icmp ult of offset value with constant

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 30 16:00:22 PDT 2021


Author: Sanjay Patel
Date: 2021-06-30T19:00:12-04:00
New Revision: 0c400e8953069888315f85d62780839dccbaa33c

URL: https://github.com/llvm/llvm-project/commit/0c400e8953069888315f85d62780839dccbaa33c
DIFF: https://github.com/llvm/llvm-project/commit/0c400e8953069888315f85d62780839dccbaa33c.diff

LOG: [InstCombine] fold icmp ult of offset value with constant

This is one sibling of the fold added with c7b658aeb526 .

(X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
I'm still not sure how to describe it best, but we're
translating 2 constants from an unsigned range comparison
to signed because that eliminates the offset (add) op.

This could be extended to handle the more general (non-constant)
pattern too:
https://alive2.llvm.org/ce/z/K-fMBf

  define i1 @src(i8 %a, i8 %c2) {
    %t = add i8 %a, %c2
    %c = add i8 %c2, 128 ; SMIN
    %ov = icmp ult i8 %t, %c
    ret i1 %ov
  }

  define i1 @tgt(i8 %a, i8 %c2) {
    %not_c2 = xor i8 %c2, -1
    %ov = icmp sgt i8 %a, %not_c2
    ret i1 %ov
  }

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/test/Transforms/InstCombine/icmp-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index f2be0bef999e..6bd479def210 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2640,11 +2640,16 @@ Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
 
   // Fold an unsigned compare with offset to signed compare:
   // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
-  // TODO: Find the ULT and signed predicate siblings.
+  // TODO: Find the signed predicate siblings.
   if (Pred == CmpInst::ICMP_UGT &&
       C == *C2 + APInt::getSignedMaxValue(Ty->getScalarSizeInBits()))
     return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
 
+  // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
+  if (Pred == CmpInst::ICMP_ULT &&
+      C == *C2 + APInt::getSignedMinValue(Ty->getScalarSizeInBits()))
+    return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
+
   // If the add does not wrap, we can always adjust the compare by subtracting
   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
   // are canonicalized to SGT/SLT/UGT/ULT.

diff  --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll
index 1fbc6d169f10..8ad14004076f 100644
--- a/llvm/test/Transforms/InstCombine/icmp-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-add.ll
@@ -796,8 +796,7 @@ define i1 @ugt_wrong_offset(i8 %a) {
 
 define i1 @ult_offset(i8 %a) {
 ; CHECK-LABEL: @ult_offset(
-; CHECK-NEXT:    [[T:%.*]] = add i8 [[A:%.*]], -6
-; CHECK-NEXT:    [[OV:%.*]] = icmp ult i8 [[T]], 122
+; CHECK-NEXT:    [[OV:%.*]] = icmp sgt i8 [[A:%.*]], 5
 ; CHECK-NEXT:    ret i1 [[OV]]
 ;
   %t = add i8 %a, 250
@@ -809,7 +808,7 @@ define i1 @ult_offset_use(i32 %a) {
 ; CHECK-LABEL: @ult_offset_use(
 ; CHECK-NEXT:    [[T:%.*]] = add i32 [[A:%.*]], 42
 ; CHECK-NEXT:    call void @use(i32 [[T]])
-; CHECK-NEXT:    [[OV:%.*]] = icmp ult i32 [[T]], -2147483606
+; CHECK-NEXT:    [[OV:%.*]] = icmp sgt i32 [[A]], -43
 ; CHECK-NEXT:    ret i1 [[OV]]
 ;
   %t = add i32 %a, 42
@@ -820,8 +819,7 @@ define i1 @ult_offset_use(i32 %a) {
 
 define <2 x i1> @ult_offset_splat(<2 x i5> %a) {
 ; CHECK-LABEL: @ult_offset_splat(
-; CHECK-NEXT:    [[T:%.*]] = add <2 x i5> [[A:%.*]], <i5 9, i5 9>
-; CHECK-NEXT:    [[OV:%.*]] = icmp ult <2 x i5> [[T]], <i5 -7, i5 -7>
+; CHECK-NEXT:    [[OV:%.*]] = icmp sgt <2 x i5> [[A:%.*]], <i5 -10, i5 -10>
 ; CHECK-NEXT:    ret <2 x i1> [[OV]]
 ;
   %t = add <2 x i5> %a, <i5 9, i5 9>
@@ -829,6 +827,8 @@ define <2 x i1> @ult_offset_splat(<2 x i5> %a) {
   ret <2 x i1> %ov
 }
 
+; negative test - constants must 
diff er by SMIN
+
 define i1 @ult_wrong_offset(i8 %a) {
 ; CHECK-LABEL: @ult_wrong_offset(
 ; CHECK-NEXT:    [[T:%.*]] = add i8 [[A:%.*]], -6


        


More information about the llvm-commits mailing list