[llvm] 44e5afd - [InstCombine] Generalize foldICmpWithMinMax
Yingwei Zheng via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 10 11:27:49 PDT 2023
Author: Yingwei Zheng
Date: 2023-09-11T02:26:48+08:00
New Revision: 44e5afdb91b92dedfbd3100a3e73dab27de1c9cf
URL: https://github.com/llvm/llvm-project/commit/44e5afdb91b92dedfbd3100a3e73dab27de1c9cf
DIFF: https://github.com/llvm/llvm-project/commit/44e5afdb91b92dedfbd3100a3e73dab27de1c9cf.diff
LOG: [InstCombine] Generalize foldICmpWithMinMax
This patch generalizes the fold of `icmp pred min/max(X, Y), Z` to address the issue https://github.com/llvm/llvm-project/issues/62898.
For example, we can fold `smin(X, Y) < Z` into `X < Z` when `Y > Z` is implied by constant folds/invariants/dom conditions.
Alive2 (with `--disable-undef-input` due to the limitation of --smt-to=10000): https://alive2.llvm.org/ce/z/rB7qLc
You can run the standalone translation validation tool `alive-tv` locally to verify these transformations.
```
alive-tv transforms.ll --smt-to=600000 --exit-on-error
```
Reviewed By: goldstein.w.n
Differential Revision: https://reviews.llvm.org/D156238
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/lib/Transforms/InstCombine/InstCombineInternal.h
llvm/test/Transforms/InstCombine/smax-icmp.ll
llvm/test/Transforms/InstCombine/smin-icmp.ll
llvm/test/Transforms/InstCombine/umax-icmp.ll
llvm/test/Transforms/InstCombine/umin-icmp.ll
llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 9fdc46fec631679..e1b879b4c39016c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4964,88 +4964,135 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
return nullptr;
}
-/// Fold icmp Pred min|max(X, Y), X.
-static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
- ICmpInst::Predicate Pred = Cmp.getPredicate();
- Value *Op0 = Cmp.getOperand(0);
- Value *X = Cmp.getOperand(1);
-
- // Canonicalize minimum or maximum operand to LHS of the icmp.
- if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
- match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
- match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
- match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
- std::swap(Op0, X);
- Pred = Cmp.getSwappedPredicate();
- }
-
- Value *Y;
- if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
- // smin(X, Y) == X --> X s<= Y
- // smin(X, Y) s>= X --> X s<= Y
- if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
- return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
-
- // smin(X, Y) != X --> X s> Y
- // smin(X, Y) s< X --> X s> Y
- if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
- return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
-
- // These cases should be handled in InstSimplify:
- // smin(X, Y) s<= X --> true
- // smin(X, Y) s> X --> false
+/// Fold icmp Pred min|max(X, Y), Z.
+Instruction *
+InstCombinerImpl::foldICmpWithMinMaxImpl(Instruction &I,
+ MinMaxIntrinsic *MinMax, Value *Z,
+ ICmpInst::Predicate Pred) {
+ Value *X = MinMax->getLHS();
+ Value *Y = MinMax->getRHS();
+ if (ICmpInst::isSigned(Pred) && !MinMax->isSigned())
return nullptr;
- }
-
- if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
- // smax(X, Y) == X --> X s>= Y
- // smax(X, Y) s<= X --> X s>= Y
- if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
- return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
-
- // smax(X, Y) != X --> X s< Y
- // smax(X, Y) s> X --> X s< Y
- if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
- return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
-
- // These cases should be handled in InstSimplify:
- // smax(X, Y) s>= X --> true
- // smax(X, Y) s< X --> false
+ if (ICmpInst::isUnsigned(Pred) && MinMax->isSigned())
+ return nullptr;
+ SimplifyQuery Q = SQ.getWithInstruction(&I);
+ auto IsCondKnownTrue = [](Value *Val) -> std::optional<bool> {
+ if (!Val)
+ return std::nullopt;
+ if (match(Val, m_One()))
+ return true;
+ if (match(Val, m_Zero()))
+ return false;
+ return std::nullopt;
+ };
+ auto CmpXZ = IsCondKnownTrue(simplifyICmpInst(Pred, X, Z, Q));
+ auto CmpYZ = IsCondKnownTrue(simplifyICmpInst(Pred, Y, Z, Q));
+ if (!CmpXZ.has_value() && !CmpYZ.has_value())
return nullptr;
+ if (!CmpXZ.has_value()) {
+ std::swap(X, Y);
+ std::swap(CmpXZ, CmpYZ);
}
- if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
- // umin(X, Y) == X --> X u<= Y
- // umin(X, Y) u>= X --> X u<= Y
- if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
- return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
-
- // umin(X, Y) != X --> X u> Y
- // umin(X, Y) u< X --> X u> Y
- if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
- return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
+ switch (Pred) {
+ case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_NE: {
+ // If X == Z:
+ // Expr Result
+ // min(X, Y) == Z X <= Y
+ // max(X, Y) == Z X >= Y
+ // min(X, Y) != Z X > Y
+ // max(X, Y) != Z X < Y
+ if ((Pred == ICmpInst::ICMP_EQ) == *CmpXZ) {
+ ICmpInst::Predicate NewPred =
+ ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
+ if (Pred == ICmpInst::ICMP_NE)
+ NewPred = ICmpInst::getInversePredicate(NewPred);
+ return ICmpInst::Create(Instruction::ICmp, NewPred, X, Y);
+ }
+ // Otherwise (X != Z, nofold):
+ // Expr Result
+ // min(X, Y) == Z X > Y || Y == Z
+ // max(X, Y) == Z X < Y || Y == Z
+ // min(X, Y) != Z X <= Y && Y != Z
+ // max(X, Y) != Z X >= Y && Y != Z
+ break;
+ }
+ case ICmpInst::ICMP_SLT:
+ case ICmpInst::ICMP_ULT:
+ case ICmpInst::ICMP_SLE:
+ case ICmpInst::ICMP_ULE:
+ case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_SGE:
+ case ICmpInst::ICMP_UGE: {
+ auto FoldIntoConstant = [&](bool Value) {
+ return replaceInstUsesWith(
+ I, Constant::getIntegerValue(
+ I.getType(), APInt(1U, static_cast<uint64_t>(Value))));
+ };
+ auto FoldIntoCmpYZ = [&]() -> Instruction * {
+ if (CmpYZ.has_value())
+ return FoldIntoConstant(*CmpYZ);
+ return ICmpInst::Create(Instruction::ICmp, Pred, Y, Z);
+ };
- // These cases should be handled in InstSimplify:
- // umin(X, Y) u<= X --> true
- // umin(X, Y) u> X --> false
- return nullptr;
+ bool IsSame = MinMax->getPredicate() == ICmpInst::getStrictPredicate(Pred);
+ if (*CmpXZ) {
+ if (IsSame) {
+ // Expr Fact Result
+ // min(X, Y) < Z X < Z true
+ // min(X, Y) <= Z X <= Z true
+ // max(X, Y) > Z X > Z true
+ // max(X, Y) >= Z X >= Z true
+ return FoldIntoConstant(true);
+ } else {
+ // Expr Fact Result
+ // max(X, Y) < Z X < Z Y < Z
+ // max(X, Y) <= Z X <= Z Y <= Z
+ // min(X, Y) > Z X > Z Y > Z
+ // min(X, Y) >= Z X >= Z Y >= Z
+ return FoldIntoCmpYZ();
+ }
+ } else {
+ if (IsSame) {
+ // Expr Fact Result
+ // min(X, Y) < Z X >= Z Y < Z
+ // min(X, Y) <= Z X > Z Y <= Z
+ // max(X, Y) > Z X <= Z Y > Z
+ // max(X, Y) >= Z X < Z Y >= Z
+ return FoldIntoCmpYZ();
+ } else {
+ // Expr Fact Result
+ // max(X, Y) < Z X >= Z false
+ // max(X, Y) <= Z X > Z false
+ // min(X, Y) > Z X <= Z false
+ // min(X, Y) >= Z X < Z false
+ return FoldIntoConstant(false);
+ }
+ }
+ break;
+ }
+ default:
+ break;
}
- if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
- // umax(X, Y) == X --> X u>= Y
- // umax(X, Y) u<= X --> X u>= Y
- if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
- return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
+ return nullptr;
+}
+Instruction *InstCombinerImpl::foldICmpWithMinMax(ICmpInst &Cmp) {
+ ICmpInst::Predicate Pred = Cmp.getPredicate();
+ Value *Lhs = Cmp.getOperand(0);
+ Value *Rhs = Cmp.getOperand(1);
- // umax(X, Y) != X --> X u< Y
- // umax(X, Y) u> X --> X u< Y
- if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
- return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
+ if (MinMaxIntrinsic *MinMax = dyn_cast<MinMaxIntrinsic>(Lhs)) {
+ if (Instruction *Res = foldICmpWithMinMaxImpl(Cmp, MinMax, Rhs, Pred))
+ return Res;
+ }
- // These cases should be handled in InstSimplify:
- // umax(X, Y) u>= X --> true
- // umax(X, Y) u< X --> false
- return nullptr;
+ if (MinMaxIntrinsic *MinMax = dyn_cast<MinMaxIntrinsic>(Rhs)) {
+ if (Instruction *Res = foldICmpWithMinMaxImpl(
+ Cmp, MinMax, Lhs, ICmpInst::getSwappedPredicate(Pred)))
+ return Res;
}
return nullptr;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index e9e8e90d4802bec..e742564c1300acc 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -611,6 +611,9 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
const APInt &C);
Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
+ Instruction *foldICmpWithMinMaxImpl(Instruction &I, MinMaxIntrinsic *MinMax,
+ Value *Z, ICmpInst::Predicate Pred);
+ Instruction *foldICmpWithMinMax(ICmpInst &Cmp);
Instruction *foldICmpEquality(ICmpInst &Cmp);
Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
Instruction *foldSignBitTest(ICmpInst &I);
diff --git a/llvm/test/Transforms/InstCombine/smax-icmp.ll b/llvm/test/Transforms/InstCombine/smax-icmp.ll
index 9cf64a9d803d664..f8df92d5470e19d 100644
--- a/llvm/test/Transforms/InstCombine/smax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smax-icmp.ll
@@ -64,11 +64,11 @@ define i1 @eq_smax4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; smax(X, Y) <= X --> X >= Y
+; smax(X, Y) <= X --> Y <= X
define i1 @sle_smax1(i32 %x, i32 %y) {
; CHECK-LABEL: @sle_smax1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp sgt i32 %x, %y
@@ -81,7 +81,7 @@ define i1 @sle_smax1(i32 %x, i32 %y) {
define i1 @sle_smax2(i32 %x, i32 %y) {
; CHECK-LABEL: @sle_smax2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp sgt i32 %y, %x
@@ -176,11 +176,11 @@ define i1 @ne_smax4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; smax(X, Y) > X --> X < Y
+; smax(X, Y) > X --> Y > X
define i1 @sgt_smax1(i32 %x, i32 %y) {
; CHECK-LABEL: @sgt_smax1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp sgt i32 %x, %y
@@ -193,7 +193,7 @@ define i1 @sgt_smax1(i32 %x, i32 %y) {
define i1 @sgt_smax2(i32 %x, i32 %y) {
; CHECK-LABEL: @sgt_smax2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp sgt i32 %y, %x
@@ -240,14 +240,12 @@ define void @eq_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -256,9 +254,9 @@ define void @eq_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp sge i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -299,14 +297,12 @@ define void @eq_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -315,9 +311,9 @@ define void @eq_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp sge i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp slt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -358,13 +354,13 @@ define void @slt_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -417,13 +413,13 @@ define void @slt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -478,9 +474,9 @@ define void @sle_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
@@ -537,9 +533,9 @@ define void @sle_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
@@ -594,14 +590,10 @@ define void @sgt_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -653,14 +645,10 @@ define void @sgt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -712,14 +700,12 @@ define void @sge_smax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -771,14 +757,12 @@ define void @sge_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
diff --git a/llvm/test/Transforms/InstCombine/smin-icmp.ll b/llvm/test/Transforms/InstCombine/smin-icmp.ll
index 69ff8414df2a430..244dad451022ea6 100644
--- a/llvm/test/Transforms/InstCombine/smin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smin-icmp.ll
@@ -63,11 +63,11 @@ define i1 @eq_smin4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; smin(X, Y) >= X --> X <= Y
+; smin(X, Y) >= X --> Y >= X
define i1 @sge_smin1(i32 %x, i32 %y) {
; CHECK-LABEL: @sge_smin1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp slt i32 %x, %y
@@ -80,7 +80,7 @@ define i1 @sge_smin1(i32 %x, i32 %y) {
define i1 @sge_smin2(i32 %x, i32 %y) {
; CHECK-LABEL: @sge_smin2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp slt i32 %y, %x
@@ -175,11 +175,11 @@ define i1 @ne_smin4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; smin(X, Y) < X --> X > Y
+; smin(X, Y) < X --> Y < X
define i1 @slt_smin1(i32 %x, i32 %y) {
; CHECK-LABEL: @slt_smin1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp slt i32 %x, %y
@@ -192,7 +192,7 @@ define i1 @slt_smin1(i32 %x, i32 %y) {
define i1 @slt_smin2(i32 %x, i32 %y) {
; CHECK-LABEL: @slt_smin2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp slt i32 %y, %x
@@ -339,13 +339,11 @@ define void @eq_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -355,9 +353,9 @@ define void @eq_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp sle i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -398,13 +396,11 @@ define void @eq_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -414,9 +410,9 @@ define void @eq_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp sle i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -457,14 +453,10 @@ define void @slt_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -516,14 +508,10 @@ define void @slt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP4]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
@@ -577,10 +565,8 @@ define void @sle_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
@@ -636,10 +622,8 @@ define void @sle_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
@@ -693,13 +677,13 @@ define void @sgt_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -752,13 +736,13 @@ define void @sgt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -811,13 +795,13 @@ define void @sge_smin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -870,13 +854,13 @@ define void @sge_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
; CHECK: if:
; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP1]])
; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP2]])
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
@@ -929,11 +913,11 @@ declare void @use_v2i1(<2 x i1> %c)
define void @eq_smin_v2i32(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @eq_smin_v2i32(
; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
-; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[Y]], [[X]]
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]])
; CHECK-NEXT: call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer)
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sle <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sge <2 x i32> [[Y]], [[X]]
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], [[X]]
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]])
@@ -981,7 +965,7 @@ define void @eq_smin_v2i32_constant(<2 x i32> %y) {
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]])
; CHECK-NEXT: call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer)
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[Y]], <i32 9, i32 9>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]])
@@ -1071,10 +1055,8 @@ define void @sle_smin_v2i32_constant(<2 x i32> %y) {
; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 5, i32 10>)
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
-; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
-; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]])
+; CHECK-NEXT: call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer)
; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
@@ -1119,13 +1101,13 @@ define void @sle_smin_v2i32_constant(<2 x i32> %y) {
define void @sgt_smin_v2i32_constant(<2 x i32> %y) {
; CHECK-LABEL: @sgt_smin_v2i32_constant(
; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 15, i32 15>)
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[Y]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]])
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[Y]], <i32 11, i32 11>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[Y]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[Y]], <i32 9, i32 9>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]])
@@ -1169,13 +1151,13 @@ define void @sgt_smin_v2i32_constant(<2 x i32> %y) {
define void @sge_smin_v2i32_constant(<2 x i32> %y) {
; CHECK-LABEL: @sge_smin_v2i32_constant(
; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 15, i32 10>)
-; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[Y]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]])
; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]])
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]])
-; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[Y]], <i32 9, i32 9>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]])
diff --git a/llvm/test/Transforms/InstCombine/umax-icmp.ll b/llvm/test/Transforms/InstCombine/umax-icmp.ll
index 16676874143ee9e..d6706374c49897c 100644
--- a/llvm/test/Transforms/InstCombine/umax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umax-icmp.ll
@@ -64,11 +64,11 @@ define i1 @eq_umax4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; umax(X, Y) <= X --> X >= Y
+; umax(X, Y) <= X --> Y <= X
define i1 @ule_umax1(i32 %x, i32 %y) {
; CHECK-LABEL: @ule_umax1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ugt i32 %x, %y
@@ -81,7 +81,7 @@ define i1 @ule_umax1(i32 %x, i32 %y) {
define i1 @ule_umax2(i32 %x, i32 %y) {
; CHECK-LABEL: @ule_umax2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ugt i32 %y, %x
@@ -176,11 +176,11 @@ define i1 @ne_umax4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; umax(X, Y) > X --> X < Y
+; umax(X, Y) > X --> Y > X
define i1 @ugt_umax1(i32 %x, i32 %y) {
; CHECK-LABEL: @ugt_umax1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ugt i32 %x, %y
@@ -193,7 +193,7 @@ define i1 @ugt_umax1(i32 %x, i32 %y) {
define i1 @ugt_umax2(i32 %x, i32 %y) {
; CHECK-LABEL: @ugt_umax2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ugt i32 %y, %x
@@ -248,17 +248,15 @@ define void @eq_umax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: [[CMP9:%.*]] = icmp uge i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -307,17 +305,15 @@ define void @eq_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: [[CMP9:%.*]] = icmp uge i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp ult i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -366,13 +362,13 @@ define void @ult_umax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
@@ -425,13 +421,13 @@ define void @ult_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
@@ -486,9 +482,9 @@ define void @ule_umax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
@@ -545,9 +541,9 @@ define void @ule_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
@@ -602,14 +598,10 @@ define void @ugt_umax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
@@ -661,14 +653,10 @@ define void @ugt_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
@@ -720,14 +708,12 @@ define void @uge_umax_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
@@ -779,14 +765,12 @@ define void @uge_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
diff --git a/llvm/test/Transforms/InstCombine/umin-icmp.ll b/llvm/test/Transforms/InstCombine/umin-icmp.ll
index 8cd0a595b6f79a9..88a391bf18ca13e 100644
--- a/llvm/test/Transforms/InstCombine/umin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umin-icmp.ll
@@ -64,11 +64,11 @@ define i1 @eq_umin4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; umin(X, Y) >= X --> X <= Y
+; umin(X, Y) >= X --> Y >= X
define i1 @uge_umin1(i32 %x, i32 %y) {
; CHECK-LABEL: @uge_umin1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ult i32 %x, %y
@@ -81,7 +81,7 @@ define i1 @uge_umin1(i32 %x, i32 %y) {
define i1 @uge_umin2(i32 %x, i32 %y) {
; CHECK-LABEL: @uge_umin2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ult i32 %y, %x
@@ -176,11 +176,11 @@ define i1 @ne_umin4(i32 %a, i32 %y) {
ret i1 %cmp2
}
-; umin(X, Y) < X --> X > Y
+; umin(X, Y) < X --> Y < X
define i1 @ult_umin1(i32 %x, i32 %y) {
; CHECK-LABEL: @ult_umin1(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ult i32 %x, %y
@@ -193,7 +193,7 @@ define i1 @ult_umin1(i32 %x, i32 %y) {
define i1 @ult_umin2(i32 %x, i32 %y) {
; CHECK-LABEL: @ult_umin2(
-; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret i1 [[CMP2]]
;
%cmp1 = icmp ult i32 %y, %x
@@ -248,17 +248,15 @@ define void @eq_umin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp ule i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp ugt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -307,17 +305,15 @@ define void @eq_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
-; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP9:%.*]] = icmp ule i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
-; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP10:%.*]] = icmp ugt i32 [[X]], [[Y]]
; CHECK-NEXT: call void @use(i1 [[CMP10]])
; CHECK-NEXT: ret void
; CHECK: end:
@@ -366,14 +362,10 @@ define void @ult_umin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
@@ -425,14 +417,10 @@ define void @ult_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP8]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
@@ -486,10 +474,8 @@ define void @ule_umin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
@@ -545,10 +531,8 @@ define void @ule_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP4]])
; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
-; CHECK-NEXT: call void @use(i1 [[CMP7]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 false)
; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
@@ -602,13 +586,13 @@ define void @ugt_umin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
@@ -661,13 +645,13 @@ define void @ugt_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
-; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
-; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
@@ -720,13 +704,13 @@ define void @uge_umin_contextual(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
@@ -779,13 +763,13 @@ define void @uge_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: call void @use(i1 [[CMP3]])
; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP4]])
-; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP5]])
; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP6]])
; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP7]])
-; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[Y]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP8]])
; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
; CHECK-NEXT: call void @use(i1 [[CMP9]])
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
index 04142da386dcb81..3a5db926082f093 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll
@@ -13,11 +13,10 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK-NEXT: [[DOT10:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP0:%.*]], i64 16
; CHECK-NEXT: [[DOT12:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP1:%.*]], i64 16
; CHECK-NEXT: [[UMAX2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP2:%.*]], i64 1)
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX2]], 16
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 16
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
-; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP2]], i64 1)
-; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[UMAX]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], 16
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr addrspace(1) [[TMP0]], i64 [[TMP4]]
; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr addrspace(1) [[TMP1]], i64 [[TMP4]]
@@ -32,23 +31,23 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT12]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP5]], align 8, !alias.scope !0
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 4
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP7]], align 8, !alias.scope !0
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 8
-; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP9]], align 8, !alias.scope !0
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 12
-; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP11]], align 8, !alias.scope !0
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT10]], i64 [[INDEX]]
-; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD]], ptr addrspace(1) [[TMP13]], align 8, !alias.scope !3, !noalias !0
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP13]], i64 4
-; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD3]], ptr addrspace(1) [[TMP15]], align 8, !alias.scope !3, !noalias !0
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP13]], i64 8
-; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD4]], ptr addrspace(1) [[TMP17]], align 8, !alias.scope !3, !noalias !0
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP13]], i64 12
-; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD5]], ptr addrspace(1) [[TMP19]], align 8, !alias.scope !3, !noalias !0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 4
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP6]], align 8, !alias.scope !0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 8
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP7]], align 8, !alias.scope !0
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP5]], i64 12
+; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x ptr addrspace(1)>, ptr addrspace(1) [[TMP8]], align 8, !alias.scope !0
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[DOT10]], i64 [[INDEX]]
+; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD]], ptr addrspace(1) [[TMP9]], align 8, !alias.scope !3, !noalias !0
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 4
+; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD3]], ptr addrspace(1) [[TMP10]], align 8, !alias.scope !3, !noalias !0
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 8
+; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD4]], ptr addrspace(1) [[TMP11]], align 8, !alias.scope !3, !noalias !0
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[TMP9]], i64 12
+; CHECK-NEXT: store <4 x ptr addrspace(1)> [[WIDE_LOAD5]], ptr addrspace(1) [[TMP12]], align 8, !alias.scope !3, !noalias !0
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX2]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]]
@@ -63,7 +62,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs
; CHECK-NEXT: store ptr addrspace(1) [[V]], ptr addrspace(1) [[DOT20]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT4]] = add nuw nsw i64 [[INDVARS_IV3]], 1
; CHECK-NEXT: [[DOT21:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT4]], [[TMP2]]
-; CHECK-NEXT: br i1 [[DOT21]], label [[LOOP]], label [[LOOPEXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[DOT21]], label [[LOOP]], label [[LOOPEXIT]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: loopexit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 4c1a9ffcce0d3ef..6e9a0e7ac0cda7d 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -481,11 +481,10 @@ for.body: ; preds = %for.body, %entry
define void @even_load_dynamic_tc(ptr noalias nocapture readonly %A, ptr noalias nocapture %B, i64 %N) {
; CHECK-LABEL: @even_load_dynamic_tc(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N:%.*]], i64 2)
-; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 9
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 9
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP2:%.*]] = add nuw i64 [[TMP1]], 1
; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[TMP2]], 3
More information about the llvm-commits
mailing list