[llvm] [InstCombine] Factorise Add and Min/Max using Distributivity (PR #101717)

Jorge Botto via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 2 10:26:44 PDT 2024


https://github.com/jf-botto updated https://github.com/llvm/llvm-project/pull/101717

>From 00f2c40c93e7fbd8105fa90b46fd57fb676f1e04 Mon Sep 17 00:00:00 2001
From: Jorge Botto <jorge.botto.16 at ucl.ac.uk>
Date: Fri, 2 Aug 2024 15:42:40 +0100
Subject: [PATCH 1/2] precommit test

---
 .../InstCombine/intrinsic-distributive.ll     | 236 ++++++++++++++++++
 1 file changed, 236 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/intrinsic-distributive.ll

diff --git a/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll b/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll
new file mode 100644
index 0000000000000..9866decbbd314
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll
@@ -0,0 +1,236 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=instcombine < %s 2>&1 | FileCheck %s
+
+
+define i8 @umax_of_add_nuw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umax_of_add_nuw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nuw i8 %b, %a
+  %add2 = add nuw i8 %c, %a
+  %max = call i8 @llvm.umax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+define i8 @umax_of_add_nuw_comm(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umax_of_add_nuw_comm(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[A]], [[B]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[A]], [[C]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nuw i8 %a, %b
+  %add2 = add nuw i8 %a, %c
+  %max = call i8 @llvm.umax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+
+; negative test
+define i8 @umax_of_add_nsw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umax_of_add_nsw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nsw i8 %b, %a
+  %add2 = add nsw i8 %c, %a
+  %max = call i8 @llvm.umax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+; negative test
+define i8 @umax_of_add(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umax_of_add(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add i8 %b, %a
+  %add2 = add i8 %c, %a
+  %max = call i8 @llvm.umax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+define i8 @umin_of_add_nuw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umin_of_add_nuw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nuw i8 %b, %a
+  %add2 = add nuw i8 %c, %a
+  %min = call i8 @llvm.umin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+define i8 @umin_of_add_nuw_comm(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umin_of_add_nuw_comm(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[A]], [[B]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[A]], [[C]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nuw i8 %a, %b
+  %add2 = add nuw i8 %a, %c
+  %min = call i8 @llvm.umin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+; negative test
+define i8 @umin_of_add_nsw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umin_of_add_nsw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nsw i8 %b, %a
+  %add2 = add nsw i8 %c, %a
+  %min = call i8 @llvm.umin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+; negative test
+define i8 @umin_of_add(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @umin_of_add(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add i8 %b, %a
+  %add2 = add i8 %c, %a
+  %min = call i8 @llvm.umin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+; negative test
+define i8 @smax_of_add_nuw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smax_of_add_nuw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nuw i8 %b, %a
+  %add2 = add nuw i8 %c, %a
+  %max = call i8 @llvm.smax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+define i8 @smax_of_add_nsw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smax_of_add_nsw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nsw i8 %b, %a
+  %add2 = add nsw i8 %c, %a
+  %max = call i8 @llvm.smax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+define i8 @smax_of_add_nsw_comm(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smax_of_add_nsw_comm(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[A]], [[B]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[A]], [[C]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add nsw i8 %a, %b
+  %add2 = add nsw i8 %a, %c
+  %max = call i8 @llvm.smax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+; negative test
+define i8 @smax_of_add(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smax_of_add(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MAX]]
+;
+  %add1 = add i8 %b, %a
+  %add2 = add i8 %c, %a
+  %max = call i8 @llvm.smax.i8(i8 %add1, i8 %add2)
+  ret i8 %max
+}
+
+; negative test
+define i8 @smin_of_add_nuw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smin_of_add_nuw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nuw i8 %b, %a
+  %add2 = add nuw i8 %c, %a
+  %min = call i8 @llvm.smin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+define i8 @smin_of_add_nsw(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smin_of_add_nsw(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nsw i8 %b, %a
+  %add2 = add nsw i8 %c, %a
+  %min = call i8 @llvm.smin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+define i8 @smin_of_add_nsw_comm(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smin_of_add_nsw_comm(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[A]], [[B]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[A]], [[C]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add nsw i8 %a, %b
+  %add2 = add nsw i8 %a, %c
+  %min = call i8 @llvm.smin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}
+
+; negative test
+define i8 @smin_of_add(i8 %a, i8 %b, i8 %c) {
+; CHECK-LABEL: define i8 @smin_of_add(
+; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
+; CHECK-NEXT:    [[ADD1:%.*]] = add i8 [[B]], [[A]]
+; CHECK-NEXT:    [[ADD2:%.*]] = add i8 [[C]], [[A]]
+; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    ret i8 [[MIN]]
+;
+  %add1 = add i8 %b, %a
+  %add2 = add i8 %c, %a
+  %min = call i8 @llvm.smin.i8(i8 %add1, i8 %add2)
+  ret i8 %min
+}

>From 6e53489c384ae3a36673ab3e8bc30680c057d784 Mon Sep 17 00:00:00 2001
From: Jorge Botto <jorge.botto.16 at ucl.ac.uk>
Date: Fri, 2 Aug 2024 18:12:35 +0100
Subject: [PATCH 2/2] Adding missed optimisation

---
 llvm/include/llvm/IR/Operator.h               |  3 +
 .../InstCombine/InstCombineCalls.cpp          | 94 +++++++++++++++++++
 .../InstCombine/intrinsic-distributive.ll     | 40 ++++----
 3 files changed, 113 insertions(+), 24 deletions(-)

diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h
index f63f54ef94107..ec8b3f4b6318f 100644
--- a/llvm/include/llvm/IR/Operator.h
+++ b/llvm/include/llvm/IR/Operator.h
@@ -123,6 +123,9 @@ class OverflowingBinaryOperator : public Operator {
     return NoWrapKind;
   }
 
+  /// Return true if the instruction is commutative:
+  bool isCommutative() const { return Instruction::isCommutative(getOpcode()); }
+
   static bool classof(const Instruction *I) {
     return I->getOpcode() == Instruction::Add ||
            I->getOpcode() == Instruction::Sub ||
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bf7c91bf36306..2768cb86c3c42 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1505,6 +1505,97 @@ foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1,
       ConstantInt::getTrue(ZeroUndef->getType()));
 }
 
+/// Return whether "X LOp (Y ROp Z)" is always equal to
+/// "(X LOp Y) ROp (X LOp Z)".
+static bool leftDistributesOverRightIntrinsic(Instruction::BinaryOps LOp,
+                                              bool hasNUW, bool hasNSW,
+                                              Intrinsic::ID ROp) {
+  switch (ROp) {
+  case Intrinsic::umax:
+    return hasNUW && LOp == Instruction::Add;
+  case Intrinsic::umin:
+    return hasNUW && LOp == Instruction::Add;
+  case Intrinsic::smax:
+    return hasNSW && LOp == Instruction::Add;
+  case Intrinsic::smin:
+    return hasNSW && LOp == Instruction::Add;
+  default:
+    return false;
+  }
+}
+
+// Attempts to factorise a common term
+// in an instruction that has the form "(A op' B) op (C op' D)
+// where op is an intrinsic and op' is a binop
+static Value *
+foldIntrinsicUsingDistributiveLaws(IntrinsicInst *II,
+                                   InstCombiner::BuilderTy &Builder) {
+  Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
+  Intrinsic::ID TopLevelOpcode = II->getIntrinsicID();
+
+  OverflowingBinaryOperator *Op0 = dyn_cast<OverflowingBinaryOperator>(LHS);
+  OverflowingBinaryOperator *Op1 = dyn_cast<OverflowingBinaryOperator>(RHS);
+
+  if (!Op0 || !Op1)
+    return nullptr;
+
+  if (Op0->getOpcode() != Op1->getOpcode())
+    return nullptr;
+
+  if (!(Op0->hasNoUnsignedWrap() == Op1->hasNoUnsignedWrap()) ||
+      !(Op0->hasNoSignedWrap() == Op1->hasNoSignedWrap()))
+    return nullptr;
+
+  if (!Op0->hasOneUse() || !Op1->hasOneUse())
+    return nullptr;
+
+  Instruction::BinaryOps InnerOpcode =
+      static_cast<Instruction::BinaryOps>(Op0->getOpcode());
+  bool HasNUW = Op0->hasNoUnsignedWrap();
+  bool HasNSW = Op0->hasNoSignedWrap();
+
+  if (!InnerOpcode)
+    return nullptr;
+
+  if (!leftDistributesOverRightIntrinsic(InnerOpcode, HasNUW, HasNSW,
+                                         TopLevelOpcode))
+    return nullptr;
+
+  assert(II->isCommutative() && Op0->isCommutative() &&
+         "Only inner and outer commutative op codes are supported.");
+
+  Value *A = Op0->getOperand(0);
+  Value *B = Op0->getOperand(1);
+  Value *C = Op1->getOperand(0);
+  Value *D = Op1->getOperand(1);
+
+  if (A == C || A == D) {
+    if (A != C)
+      std::swap(C, D);
+
+    Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, B, D);
+    BinaryOperator *NewBinop =
+        cast<BinaryOperator>(Builder.CreateBinOp(InnerOpcode, NewIntrinsic, A));
+    NewBinop->setHasNoSignedWrap(HasNSW);
+    NewBinop->setHasNoUnsignedWrap(HasNUW);
+    return NewBinop;
+  }
+
+  if (B == D || B == C) {
+    if (B != D)
+      std::swap(C, D);
+
+    Value *NewIntrinsic = Builder.CreateBinaryIntrinsic(TopLevelOpcode, A, C);
+    BinaryOperator *NewBinop =
+        cast<BinaryOperator>(Builder.CreateBinOp(InnerOpcode, NewIntrinsic, B));
+    NewBinop->setHasNoSignedWrap(HasNSW);
+    NewBinop->setHasNoUnsignedWrap(HasNUW);
+    return NewBinop;
+  }
+
+  return nullptr;
+}
+
 /// CallInst simplification. This mostly only handles folding of intrinsic
 /// instructions. For normal calls, it allows visitCallBase to do the heavy
 /// lifting.
@@ -1929,6 +2020,9 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
       }
     }
 
+    if (Value *V = foldIntrinsicUsingDistributiveLaws(II, Builder))
+      return replaceInstUsesWith(*II, V);
+
     break;
   }
   case Intrinsic::bitreverse: {
diff --git a/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll b/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll
index 9866decbbd314..f58ce04cb6711 100644
--- a/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll
+++ b/llvm/test/Transforms/InstCombine/intrinsic-distributive.ll
@@ -5,9 +5,8 @@
 define i8 @umax_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @umax_of_add_nuw(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
-; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MAX:%.*]] = add nuw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MAX]]
 ;
   %add1 = add nuw i8 %b, %a
@@ -19,9 +18,8 @@ define i8 @umax_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 define i8 @umax_of_add_nuw_comm(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @umax_of_add_nuw_comm(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[A]], [[B]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[A]], [[C]]
-; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.umax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MAX:%.*]] = add nuw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MAX]]
 ;
   %add1 = add nuw i8 %a, %b
@@ -64,9 +62,8 @@ define i8 @umax_of_add(i8 %a, i8 %b, i8 %c) {
 define i8 @umin_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @umin_of_add_nuw(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[B]], [[A]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[C]], [[A]]
-; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MIN:%.*]] = add nuw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MIN]]
 ;
   %add1 = add nuw i8 %b, %a
@@ -78,9 +75,8 @@ define i8 @umin_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 define i8 @umin_of_add_nuw_comm(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @umin_of_add_nuw_comm(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nuw i8 [[A]], [[B]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nuw i8 [[A]], [[C]]
-; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.umin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MIN:%.*]] = add nuw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MIN]]
 ;
   %add1 = add nuw i8 %a, %b
@@ -137,9 +133,8 @@ define i8 @smax_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 define i8 @smax_of_add_nsw(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @smax_of_add_nsw(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
-; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smax.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MAX:%.*]] = add nsw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MAX]]
 ;
   %add1 = add nsw i8 %b, %a
@@ -151,9 +146,8 @@ define i8 @smax_of_add_nsw(i8 %a, i8 %b, i8 %c) {
 define i8 @smax_of_add_nsw_comm(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @smax_of_add_nsw_comm(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[A]], [[B]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[A]], [[C]]
-; CHECK-NEXT:    [[MAX:%.*]] = call i8 @llvm.smax.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smax.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MAX:%.*]] = add nsw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MAX]]
 ;
   %add1 = add nsw i8 %a, %b
@@ -195,9 +189,8 @@ define i8 @smin_of_add_nuw(i8 %a, i8 %b, i8 %c) {
 define i8 @smin_of_add_nsw(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @smin_of_add_nsw(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[B]], [[A]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[C]], [[A]]
-; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smin.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MIN:%.*]] = add nsw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MIN]]
 ;
   %add1 = add nsw i8 %b, %a
@@ -209,9 +202,8 @@ define i8 @smin_of_add_nsw(i8 %a, i8 %b, i8 %c) {
 define i8 @smin_of_add_nsw_comm(i8 %a, i8 %b, i8 %c) {
 ; CHECK-LABEL: define i8 @smin_of_add_nsw_comm(
 ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]]) {
-; CHECK-NEXT:    [[ADD1:%.*]] = add nsw i8 [[A]], [[B]]
-; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i8 [[A]], [[C]]
-; CHECK-NEXT:    [[MIN:%.*]] = call i8 @llvm.smin.i8(i8 [[ADD1]], i8 [[ADD2]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smin.i8(i8 [[B]], i8 [[C]])
+; CHECK-NEXT:    [[MIN:%.*]] = add nsw i8 [[TMP1]], [[A]]
 ; CHECK-NEXT:    ret i8 [[MIN]]
 ;
   %add1 = add nsw i8 %a, %b



More information about the llvm-commits mailing list