[llvm] r356584 - [InstCombine] Fold add nuw + uadd.with.overflow
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 20 11:00:27 PDT 2019
Author: nikic
Date: Wed Mar 20 11:00:27 2019
New Revision: 356584
URL: http://llvm.org/viewvc/llvm-project?rev=356584&view=rev
Log:
[InstCombine] Fold add nuw + uadd.with.overflow
Fold add nuw and uadd.with.overflow with constants if the
addition does not overflow.
Part of https://bugs.llvm.org/show_bug.cgi?id=38146.
Patch by Dan Robertson.
Differential Revision: https://reviews.llvm.org/D59471
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/trunk/test/Transforms/InstCombine/uadd-with-overflow.ll
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp?rev=356584&r1=356583&r2=356584&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp Wed Mar 20 11:00:27 2019
@@ -2063,6 +2063,7 @@ Instruction *InstCombiner::visitCallInst
return &CI;
break;
}
+ case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow: {
if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
return I;
@@ -2070,25 +2071,27 @@ Instruction *InstCombiner::visitCallInst
return I;
// Given 2 constant operands whose sum does not overflow:
+ // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
// saddo (X +nsw C0), C1 -> saddo X, C0 + C1
Value *X;
const APInt *C0, *C1;
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
- if (match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0))) &&
- match(Arg1, m_APInt(C1))) {
+ bool IsSigned = II->getIntrinsicID() == Intrinsic::sadd_with_overflow;
+ bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
+ : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
+ if (HasNWAdd && match(Arg1, m_APInt(C1))) {
bool Overflow;
- APInt NewC = C1->sadd_ov(*C0, Overflow);
+ APInt NewC =
+ IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
if (!Overflow)
return replaceInstUsesWith(
*II, Builder.CreateBinaryIntrinsic(
- Intrinsic::sadd_with_overflow, X,
+ II->getIntrinsicID(), X,
ConstantInt::get(Arg1->getType(), NewC)));
}
-
break;
}
- case Intrinsic::uadd_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
if (Instruction *I = canonicalizeConstantArg0ToArg1(CI))
Modified: llvm/trunk/test/Transforms/InstCombine/uadd-with-overflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/uadd-with-overflow.ll?rev=356584&r1=356583&r2=356584&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/uadd-with-overflow.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/uadd-with-overflow.ll Wed Mar 20 11:00:27 2019
@@ -11,9 +11,8 @@ declare { i8, i1 } @llvm.uadd.with.overf
define { i32, i1 } @simple_fold(i32 %x) {
; CHECK-LABEL: @simple_fold(
-; CHECK-NEXT: [[A:%.*]] = add nuw i32 [[X:%.*]], 7
-; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 13)
-; CHECK-NEXT: ret { i32, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 20)
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw i32 %x, 7
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13)
@@ -22,9 +21,8 @@ define { i32, i1 } @simple_fold(i32 %x)
define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) {
; CHECK-LABEL: @fold_on_constant_add_no_overflow(
-; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[X:%.*]], -56
-; CHECK-NEXT: [[B:%.*]] = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[A]], i8 55)
-; CHECK-NEXT: ret { i8, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[X:%.*]], i8 -1)
+; CHECK-NEXT: ret { i8, i1 } [[TMP1]]
;
%a = add nuw i8 %x, 200
%b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55)
@@ -65,9 +63,8 @@ define { <2 x i8>, <2 x i1> } @no_fold_v
define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
; CHECK-LABEL: @fold_simple_splat_constant(
-; CHECK-NEXT: [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 12>
-; CHECK-NEXT: [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
-; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[X:%.*]], <2 x i32> <i32 42, i32 42>)
+; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[TMP1]]
;
%a = add nuw <2 x i32> %x, <i32 12, i32 12>
%b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
@@ -98,9 +95,8 @@ define { <2 x i32>, <2 x i1> } @no_fold_
define { i32, i1 } @fold_nuwnsw(i32 %x) {
; CHECK-LABEL: @fold_nuwnsw(
-; CHECK-NEXT: [[A:%.*]] = add nuw nsw i32 [[X:%.*]], 12
-; CHECK-NEXT: [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
-; CHECK-NEXT: ret { i32, i1 } [[B]]
+; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42)
+; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nuw nsw i32 %x, 12
%b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
More information about the llvm-commits
mailing list