[llvm] r358099 - [InstCombine] ssubo X, C -> saddo X, -C

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 10 09:27:36 PDT 2019


Author: nikic
Date: Wed Apr 10 09:27:36 2019
New Revision: 358099

URL: http://llvm.org/viewvc/llvm-project?rev=358099&view=rev
Log:
[InstCombine] ssubo X, C -> saddo X, -C

ssubo X, C is equivalent to saddo X, -C. Make the transformation in
InstCombine and allow the logic implemented for saddo to fold prior
usages of add nsw or sub nsw with constants.

Patch by Dan Robertson.

Differential Revision: https://reviews.llvm.org/D60061

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/trunk/test/Transforms/InstCombine/ssub-with-overflow.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=358099&r1=358098&r2=358099&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Wed Apr 10 09:27:36 2019
@@ -2125,6 +2125,7 @@ Instruction *InstCombiner::visitCallInst
     }
     break;
   }
+
   case Intrinsic::umul_with_overflow:
   case Intrinsic::smul_with_overflow:
     if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
@@ -2132,10 +2133,30 @@ Instruction *InstCombiner::visitCallInst
     LLVM_FALLTHROUGH;
 
   case Intrinsic::usub_with_overflow:
+    if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
+      return I;
+    break;
+
   case Intrinsic::ssub_with_overflow: {
     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
       return I;
 
+    Constant *C;
+    Value *Arg0 = II->getArgOperand(0);
+    Value *Arg1 = II->getArgOperand(1);
+    // Given a constant C that is not the minimum signed value
+    // for an integer of a given bit width:
+    //
+    // ssubo X, C -> saddo X, -C
+    if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
+      Value *NegVal = ConstantExpr::getNeg(C);
+      // Build a saddo call that is equivalent to the discovered
+      // ssubo call.
+      return replaceInstUsesWith(
+          *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
+                                             Arg0, NegVal));
+    }
+
     break;
   }
 

Modified: llvm/trunk/test/Transforms/InstCombine/ssub-with-overflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/ssub-with-overflow.ll?rev=358099&r1=358098&r2=358099&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/ssub-with-overflow.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/ssub-with-overflow.ll Wed Apr 10 09:27:36 2019
@@ -11,9 +11,8 @@ declare { i8, i1 } @llvm.ssub.with.overf
 
 define { i32, i1 } @simple_fold(i32 %x) {
 ; CHECK-LABEL: @simple_fold(
-; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], -7
-; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A]], i32 13)
-; CHECK-NEXT:    ret { i32, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 -20)
+; CHECK-NEXT:    ret { i32, i1 } [[TMP1]]
 ;
   %a = sub nsw i32 %x, 7
   %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 13)
@@ -33,9 +32,8 @@ define { i32, i1 } @fold_mixed_signs(i32
 
 define { i8, i1 } @fold_on_constant_sub_no_overflow(i8 %x) {
 ; CHECK-LABEL: @fold_on_constant_sub_no_overflow(
-; CHECK-NEXT:    [[A:%.*]] = add nsw i8 [[X:%.*]], -100
-; CHECK-NEXT:    [[B:%.*]] = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[A]], i8 28)
-; CHECK-NEXT:    ret { i8, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[X:%.*]], i8 -128)
+; CHECK-NEXT:    ret { i8, i1 } [[TMP1]]
 ;
   %a = sub nsw i8 %x, 100
   %b = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %a, i8 28)
@@ -45,8 +43,8 @@ define { i8, i1 } @fold_on_constant_sub_
 define { i8, i1 } @no_fold_on_constant_sub_overflow(i8 %x) {
 ; CHECK-LABEL: @no_fold_on_constant_sub_overflow(
 ; CHECK-NEXT:    [[A:%.*]] = add nsw i8 [[X:%.*]], -100
-; CHECK-NEXT:    [[B:%.*]] = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[A]], i8 29)
-; CHECK-NEXT:    ret { i8, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 [[A]], i8 -29)
+; CHECK-NEXT:    ret { i8, i1 } [[TMP1]]
 ;
   %a = sub nsw i8 %x, 100
   %b = tail call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 %a, i8 29)
@@ -55,9 +53,8 @@ define { i8, i1 } @no_fold_on_constant_s
 
 define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
 ; CHECK-LABEL: @fold_simple_splat_constant(
-; CHECK-NEXT:    [[A:%.*]] = add nsw <2 x i32> [[X:%.*]], <i32 -12, i32 -12>
-; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 -42, i32 -42>)
+; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[TMP1]]
 ;
   %a = sub nsw <2 x i32> %x, <i32 12, i32 12>
   %b = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
@@ -67,8 +64,8 @@ define { <2 x i32>, <2 x i1> } @fold_sim
 define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) {
 ; CHECK-LABEL: @no_fold_splat_undef_constant(
 ; CHECK-NEXT:    [[A:%.*]] = add <2 x i32> [[X:%.*]], <i32 -12, i32 undef>
-; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 -30, i32 -30>)
+; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[TMP1]]
 ;
   %a = sub nsw <2 x i32> %x, <i32 12, i32 undef>
   %b = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
@@ -78,8 +75,8 @@ define { <2 x i32>, <2 x i1> } @no_fold_
 define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) {
 ; CHECK-LABEL: @no_fold_splat_not_constant(
 ; CHECK-NEXT:    [[A:%.*]] = sub nsw <2 x i32> [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 -30, i32 -30>)
+; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[TMP1]]
 ;
   %a = sub nsw <2 x i32> %x, %y
   %b = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
@@ -88,9 +85,8 @@ define { <2 x i32>, <2 x i1> } @no_fold_
 
 define { i32, i1 } @fold_nuwnsw(i32 %x) {
 ; CHECK-LABEL: @fold_nuwnsw(
-; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], -12
-; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A]], i32 30)
-; CHECK-NEXT:    ret { i32, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 -42)
+; CHECK-NEXT:    ret { i32, i1 } [[TMP1]]
 ;
   %a = sub nuw nsw i32 %x, 12
   %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 30)
@@ -100,8 +96,8 @@ define { i32, i1 } @fold_nuwnsw(i32 %x)
 define { i32, i1 } @no_fold_nuw(i32 %x) {
 ; CHECK-LABEL: @no_fold_nuw(
 ; CHECK-NEXT:    [[A:%.*]] = add i32 [[X:%.*]], -12
-; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A]], i32 30)
-; CHECK-NEXT:    ret { i32, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A]], i32 -30)
+; CHECK-NEXT:    ret { i32, i1 } [[TMP1]]
 ;
   %a = sub nuw i32 %x, 12
   %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 30)
@@ -121,9 +117,8 @@ define { i32, i1 } @no_fold_wrapped_sub(
 
 define { i32, i1 } @fold_add_simple(i32 %x) {
 ; CHECK-LABEL: @fold_add_simple(
-; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], -12
-; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A]], i32 30)
-; CHECK-NEXT:    ret { i32, i1 } [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 -42)
+; CHECK-NEXT:    ret { i32, i1 } [[TMP1]]
 ;
   %a = add nsw i32 %x, -12
   %b = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 30)
@@ -141,8 +136,8 @@ define { <2 x i32>, <2 x i1> } @keep_ssu
 
 define { <2 x i32>, <2 x i1> } @keep_ssubo_non_splat(<2 x i32> %x) {
 ; CHECK-LABEL: @keep_ssubo_non_splat(
-; CHECK-NEXT:    [[A:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 30, i32 31>)
-; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[A]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 -30, i32 -31>)
+; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[TMP1]]
 ;
   %a = tail call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> %x, <2 x i32> <i32 30, i32 31>)
   ret { <2 x i32>, <2 x i1> } %a




More information about the llvm-commits mailing list