[llvm] 32ad455 - [InstCombine] Add test cases from PR62898. NFC.

Yingwei Zheng via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 10 11:27:47 PDT 2023


Author: Yingwei Zheng
Date: 2023-09-11T02:26:47+08:00
New Revision: 32ad45556e9c673d2b6daba4a3b7993ad4e8e9ac

URL: https://github.com/llvm/llvm-project/commit/32ad45556e9c673d2b6daba4a3b7993ad4e8e9ac
DIFF: https://github.com/llvm/llvm-project/commit/32ad45556e9c673d2b6daba4a3b7993ad4e8e9ac.diff

LOG: [InstCombine] Add test cases from PR62898. NFC.

This patch adds some test cases from https://github.com/llvm/llvm-project/issues/62898.

As nikic noted in the issue, we should start by implementing a generalization of the fold `smin(X, Y) < Z -> X < Z` when `Y > Z` is implied by constant folds/invariants/dom conditions.
```
define i1 @src(i32 %x, i32 %y, i32 %z) {
  %cmp = icmp sgt i32 %y, %z
  br i1 %cmp, label %if, label %end
if:
  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
  %tobool = icmp slt i32 %cond, %z
  ret i1 %tobool
end:
  ret i1 false
}

define i1 @tgt(i32 %x, i32 %y, i32 %z) {
  %cmp = icmp sgt i32 %y, %z
  br i1 %cmp, label %if, label %end
if:
  %tobool = icmp slt i32 %x, %z
  ret i1 %tobool
end:
  ret i1 false
}

declare i32 @llvm.smin.i32(i32, i32)
```
Alive2: https://alive2.llvm.org/ce/z/dK9vXz

This patch also adds some generalized test cases like the above.

Reviewed By: goldstein.w.n

Differential Revision: https://reviews.llvm.org/D156227

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/smax-icmp.ll
    llvm/test/Transforms/InstCombine/smin-icmp.ll
    llvm/test/Transforms/InstCombine/umax-icmp.ll
    llvm/test/Transforms/InstCombine/umin-icmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/smax-icmp.ll b/llvm/test/Transforms/InstCombine/smax-icmp.ll
index 830ae9bd7337f3f..9cf64a9d803d664 100644
--- a/llvm/test/Transforms/InstCombine/smax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smax-icmp.ll
@@ -232,3 +232,596 @@ define i1 @sgt_smax4(i32 %a, i32 %y) {
   ret i1 %cmp2
 }
 
+declare void @use(i1 %c)
+
+define void @eq_smax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_smax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @eq_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_smax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @slt_smax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @slt_smax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp slt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @slt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @slt_smax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp slt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sle_smax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sle_smax_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sle i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sle_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sle_smax_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sle i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sgt_smax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sgt_smax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sgt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sgt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sgt_smax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sgt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sge_smax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sge_smax_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sge_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sge_smax_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+declare i32 @llvm.smax.i32(i32, i32)

diff  --git a/llvm/test/Transforms/InstCombine/smin-icmp.ll b/llvm/test/Transforms/InstCombine/smin-icmp.ll
index 7089b5469d26294..69ff8414df2a430 100644
--- a/llvm/test/Transforms/InstCombine/smin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/smin-icmp.ll
@@ -331,3 +331,967 @@ define i1 @sgt_smin4(i32 %a, i32 %y) {
   ret i1 %cmp2
 }
 
+declare void @use(i1 %c)
+
+define void @eq_smin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_smin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @eq_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_smin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @slt_smin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @slt_smin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp slt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @slt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @slt_smin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp slt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sle_smin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sle_smin_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sle i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sle_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sle_smin_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sle i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sgt_smin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sgt_smin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sgt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sgt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sgt_smin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sgt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sge_smin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sge_smin_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @sge_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @sge_smin_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp sge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+declare void @use_v2i1(<2 x i1> %c)
+
+; icmp pred smin(X, Y), X
+define void @eq_smin_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @eq_smin_v2i32(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp sgt <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> zeroinitializer)
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sle <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], [[X]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule <2 x i32> [[COND]], [[X]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], [[X]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge <2 x i32> [[COND]], [[X]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp sle <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp sgt <2 x i32> [[X]], [[Y]]
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %x, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, %x
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where C1 == C2
+define void @eq_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @eq_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 10, i32 10>)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt <2 x i32> [[Y]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> zeroinitializer)
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp sgt <2 x i32> [[Y]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp slt <2 x i32> [[Y]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 10, i32 10>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where C1 < C2
+define void @slt_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @slt_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 5, i32 5>)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> zeroinitializer)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> zeroinitializer)
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> zeroinitializer)
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> <i1 true, i1 true>)
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 5, i32 5>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where C1 <= C2
+define void @sle_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @sle_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 5, i32 10>)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 5, i32 10>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where C1 > C2
+define void @sgt_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @sgt_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 15, i32 15>)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 15, i32 15>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where C1 >= C2
+define void @sge_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @sge_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 15, i32 10>)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 15, i32 10>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; icmp pred smin(C1, Y), C2 where (icmp pred' C1, C2) is not a constant splat for all pred'
+define void @unknown_smin_v2i32_constant(<2 x i32> %y) {
+; CHECK-LABEL: @unknown_smin_v2i32_constant(
+; CHECK-NEXT:    [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> <i32 5, i32 15>)
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], <i32 11, i32 11>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], <i32 9, i32 9>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], <i32 10, i32 10>
+; CHECK-NEXT:    call void @use_v2i1(<2 x i1> [[CMP10]])
+; CHECK-NEXT:    ret void
+;
+  %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> <i32 5, i32 15>, <2 x i32> %y)
+  %cmp1 = icmp slt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp1)
+  %cmp2 = icmp sle <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp2)
+  %cmp3 = icmp sgt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp3)
+  %cmp4 = icmp sge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp4)
+  %cmp5 = icmp ult <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp5)
+  %cmp6 = icmp ule <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp6)
+  %cmp7 = icmp ugt <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp7)
+  %cmp8 = icmp uge <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp8)
+  %cmp9 = icmp eq <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp9)
+  %cmp10 = icmp ne <2 x i32> %cond, <i32 10, i32 10>
+  call void @use_v2i1(<2 x i1> %cmp10)
+  ret void
+}
+
+; Test cases from PR62898
+
+define i1 @smin_or_bitwise(i32 %x) {
+; CHECK-LABEL: @smin_or_bitwise(
+; CHECK-NEXT:    [[COND:%.*]] = tail call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 1)
+; CHECK-NEXT:    [[LOBIT:%.*]] = or i32 [[COND]], [[X]]
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp slt i32 [[LOBIT]], 0
+; CHECK-NEXT:    ret i1 [[TOBOOL]]
+;
+  %cond = tail call i32 @llvm.smin.i32(i32 %x, i32 1)
+  %lobit = or i32 %cond, %x
+  %tobool = icmp slt i32 %lobit, 0
+  ret i1 %tobool
+}
+
+define i1 @smin_and_bitwise(i32 %x) {
+; CHECK-LABEL: @smin_and_bitwise(
+; CHECK-NEXT:    [[COND:%.*]] = tail call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 1)
+; CHECK-NEXT:    [[LOBIT:%.*]] = and i32 [[COND]], [[X]]
+; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp slt i32 [[LOBIT]], 0
+; CHECK-NEXT:    ret i1 [[TOBOOL]]
+;
+  %cond = tail call i32 @llvm.smin.i32(i32 %x, i32 1)
+  %lobit = and i32 %cond, %x
+  %tobool = icmp slt i32 %lobit, 0
+  ret i1 %tobool
+}
+
+declare i32 @llvm.smin.i32(i32, i32)
+declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)

diff  --git a/llvm/test/Transforms/InstCombine/umax-icmp.ll b/llvm/test/Transforms/InstCombine/umax-icmp.ll
index 8dccc05ac0c7d7a..16676874143ee9e 100644
--- a/llvm/test/Transforms/InstCombine/umax-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umax-icmp.ll
@@ -232,3 +232,596 @@ define i1 @ugt_umax4(i32 %a, i32 %y) {
   ret i1 %cmp2
 }
 
+declare void @use(i1 %c)
+
+define void @eq_umax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_umax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @eq_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_umax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ult_umax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ult_umax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ult i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ult_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ult_umax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ult i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ule_umax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ule_umax_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ule i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ule_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ule_umax_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ule i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ugt_umax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ugt_umax_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ugt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ugt_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ugt_umax_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ugt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @uge_umax_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @uge_umax_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp uge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @uge_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @uge_umax_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp uge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+declare i32 @llvm.umax.i32(i32, i32)

diff  --git a/llvm/test/Transforms/InstCombine/umin-icmp.ll b/llvm/test/Transforms/InstCombine/umin-icmp.ll
index ce3f72969f9d2af..8cd0a595b6f79a9 100644
--- a/llvm/test/Transforms/InstCombine/umin-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/umin-icmp.ll
@@ -232,3 +232,596 @@ define i1 @ult_umin4(i32 %a, i32 %y) {
   ret i1 %cmp2
 }
 
+declare void @use(i1 %c)
+
+define void @eq_umin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_umin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @eq_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @eq_umin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp eq i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ult_umin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ult_umin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ult i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ult_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ult_umin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ult i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ule_umin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ule_umin_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ule i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ule_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ule_umin_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ule i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ugt_umin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ugt_umin_contextual(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ugt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @ugt_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @ugt_umin_contextual_commuted(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ugt i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @uge_umin_contextual(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @uge_umin_contextual(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp uge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+define void @uge_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @uge_umin_contextual_commuted(
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]]
+; CHECK:       if:
+; CHECK-NEXT:    [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]])
+; CHECK-NEXT:    [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP1]])
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP2]])
+; CHECK-NEXT:    [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP3]])
+; CHECK-NEXT:    [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP4]])
+; CHECK-NEXT:    [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP5]])
+; CHECK-NEXT:    [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP6]])
+; CHECK-NEXT:    [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP7]])
+; CHECK-NEXT:    [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP8]])
+; CHECK-NEXT:    [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP9]])
+; CHECK-NEXT:    [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]]
+; CHECK-NEXT:    call void @use(i1 [[CMP10]])
+; CHECK-NEXT:    ret void
+; CHECK:       end:
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp uge i32 %x, %z
+  br i1 %cmp, label %if, label %end
+if:
+  %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+  %cmp1 = icmp slt i32 %cond, %z
+  call void @use(i1 %cmp1)
+  %cmp2 = icmp sle i32 %cond, %z
+  call void @use(i1 %cmp2)
+  %cmp3 = icmp sgt i32 %cond, %z
+  call void @use(i1 %cmp3)
+  %cmp4 = icmp sge i32 %cond, %z
+  call void @use(i1 %cmp4)
+  %cmp5 = icmp ult i32 %cond, %z
+  call void @use(i1 %cmp5)
+  %cmp6 = icmp ule i32 %cond, %z
+  call void @use(i1 %cmp6)
+  %cmp7 = icmp ugt i32 %cond, %z
+  call void @use(i1 %cmp7)
+  %cmp8 = icmp uge i32 %cond, %z
+  call void @use(i1 %cmp8)
+  %cmp9 = icmp eq i32 %cond, %z
+  call void @use(i1 %cmp9)
+  %cmp10 = icmp ne i32 %cond, %z
+  call void @use(i1 %cmp10)
+  ret void
+end:
+  ret void
+}
+
+declare i32 @llvm.umin.i32(i32, i32)


        


More information about the llvm-commits mailing list