[llvm] 4c42ab1 - [DAGCombiner] Change foldAndOrOfSETCC() to optimize and/or patterns

Konstantina Mitropoulou via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 17 17:14:20 PDT 2023


Author: Konstantina Mitropoulou
Date: 2023-07-17T17:13:47-07:00
New Revision: 4c42ab1199e9729c2f3a74f77ba765e4f9190d57

URL: https://github.com/llvm/llvm-project/commit/4c42ab1199e9729c2f3a74f77ba765e4f9190d57
DIFF: https://github.com/llvm/llvm-project/commit/4c42ab1199e9729c2f3a74f77ba765e4f9190d57.diff

LOG: [DAGCombiner] Change foldAndOrOfSETCC() to optimize and/or patterns

CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C)
CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C)

This first patch handles integer types.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D153502

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/vecreduce-bool.ll
    llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
    llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
    llvm/test/CodeGen/PowerPC/setcc-logic.ll
    llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
    llvm/test/CodeGen/X86/movmsk-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d61f97c12fc026..579af998083f35 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6021,26 +6021,91 @@ static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {
   AndOrSETCCFoldKind TargetPreference = TLI.isDesirableToCombineLogicOpOfSETCC(
       LogicOp, LHS.getNode(), RHS.getNode());
 
-  if (TargetPreference == AndOrSETCCFoldKind::None)
-    return SDValue();
-
-  ISD::CondCode CCL = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
-  ISD::CondCode CCR = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
-
   SDValue LHS0 = LHS->getOperand(0);
   SDValue RHS0 = RHS->getOperand(0);
   SDValue LHS1 = LHS->getOperand(1);
   SDValue RHS1 = RHS->getOperand(1);
-
   // TODO: We don't actually need a splat here, for vectors we just need the
   // invariants to hold for each element.
   auto *LHS1C = isConstOrConstSplat(LHS1);
   auto *RHS1C = isConstOrConstSplat(RHS1);
-
+  ISD::CondCode CCL = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+  ISD::CondCode CCR = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
   EVT VT = LogicOp->getValueType(0);
   EVT OpVT = LHS0.getValueType();
   SDLoc DL(LogicOp);
 
+  // Check if the operands of an and/or operation are comparisons and if they
+  // compare against the same value. Replace the and/or-cmp-cmp sequence with
+  // min/max cmp sequence. If LHS1 is equal to RHS1, then the or-cmp-cmp
+  // sequence will be replaced with min-cmp sequence:
+  // (LHS0 < LHS1) | (RHS0 < RHS1) -> min(LHS0, RHS0) < LHS1
+  // and and-cmp-cmp will be replaced with max-cmp sequence:
+  // (LHS0 < LHS1) & (RHS0 < RHS1) -> max(LHS0, RHS0) < LHS1
+  if (OpVT.isInteger() && TLI.isOperationLegal(ISD::UMAX, OpVT) &&
+      TLI.isOperationLegal(ISD::SMAX, OpVT) &&
+      TLI.isOperationLegal(ISD::UMIN, OpVT) &&
+      TLI.isOperationLegal(ISD::SMIN, OpVT)) {
+    SDValue CommonValue;
+    SDValue Operand1;
+    SDValue Operand2;
+    ISD::CondCode CC = ISD::SETCC_INVALID;
+    if (LHS->getOpcode() == ISD::SETCC && RHS->getOpcode() == ISD::SETCC &&
+        LHS->hasOneUse() && RHS->hasOneUse() &&
+        // The two comparisons should have either the same predicate or the
+        // predicate of one of the comparisons is the opposite of the other one.
+        (CCL == CCR || CCL == ISD::getSetCCSwappedOperands(CCR)) &&
+        // The optimization does not work for `==` or `!=` .
+        !ISD::isIntEqualitySetCC(CCL) && !ISD::isIntEqualitySetCC(CCR)) {
+      if (CCL == CCR) {
+        if (LHS0 == RHS0) {
+          CommonValue = LHS0;
+          Operand1 = LHS1;
+          Operand2 = RHS1;
+          CC = ISD::getSetCCSwappedOperands(CCL);
+        } else if (LHS1 == RHS1) {
+          CommonValue = LHS1;
+          Operand1 = LHS0;
+          Operand2 = RHS0;
+          CC = CCL;
+        }
+      } else if (CCL == ISD::getSetCCSwappedOperands(CCR)) {
+        if (LHS0 == RHS1) {
+          CommonValue = LHS0;
+          Operand1 = LHS1;
+          Operand2 = RHS0;
+          CC = ISD::getSetCCSwappedOperands(CCL);
+        } else if (RHS0 == LHS1) {
+          CommonValue = LHS1;
+          Operand1 = LHS0;
+          Operand2 = RHS1;
+          CC = CCL;
+        }
+      }
+
+      if (CC != ISD::SETCC_INVALID) {
+        unsigned NewOpcode;
+        bool IsSigned = isSignedIntSetCC(CC);
+        if (((CC == ISD::SETLE || CC == ISD::SETULE || CC == ISD::SETLT ||
+              CC == ISD::SETULT) &&
+             (LogicOp->getOpcode() == ISD::OR)) ||
+            ((CC == ISD::SETGE || CC == ISD::SETUGE || CC == ISD::SETGT ||
+              CC == ISD::SETUGT) &&
+             (LogicOp->getOpcode() == ISD::AND)))
+          NewOpcode = IsSigned ? ISD::SMIN : ISD::UMIN;
+        else
+          NewOpcode = IsSigned ? ISD::SMAX : ISD::UMAX;
+
+        SDValue MinMaxValue =
+            DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2);
+        return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC);
+      }
+    }
+  }
+
+  if (TargetPreference == AndOrSETCCFoldKind::None)
+    return SDValue();
+
   if (CCL == CCR &&
       CCL == (LogicOp->getOpcode() == ISD::AND ? ISD::SETNE : ISD::SETEQ) &&
       LHS0 == RHS0 && LHS1C && RHS1C && OpVT.isInteger() && LHS.hasOneUse() &&

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 58020d28702b2f..d75daa5f0c966e 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -96,7 +96,7 @@ define i32 @reduce_and_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 define i32 @reduce_and_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-LABEL: reduce_and_v32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    smax v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    uminv b0, v0.16b
 ; CHECK-NEXT:    fmov w8, s0
@@ -190,7 +190,7 @@ define i32 @reduce_or_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 define i32 @reduce_or_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-LABEL: reduce_or_v32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    smin v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    umaxv b0, v0.16b
 ; CHECK-NEXT:    fmov w8, s0

diff  --git a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
index 27a3117db10f45..50e9494fc03df6 100644
--- a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
@@ -434,33 +434,31 @@ bb:
 define amdgpu_kernel void @add_and(ptr addrspace(1) nocapture %arg) {
 ; GCN-LABEL: add_and:
 ; GCN:       ; %bb.0: ; %bb
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, 0
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, 0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
 ; GCN-NEXT:    v_mov_b32_e32 v3, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    buffer_load_dword v4, v[2:3], s[4:7], 0 addr64
-; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, v0, v1
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], 1, v0
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; GCN-NEXT:    buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
+; GCN-NEXT:    v_max_u32_e32 v1, 1, v1
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v4, vcc
-; GCN-NEXT:    buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT:    buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
 ; GCN-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: add_and:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; GFX9-NEXT:    v_cmp_gt_u32_e32 vcc, v0, v1
-; GFX9-NEXT:    v_cmp_lt_u32_e64 s[0:1], 1, v0
-; GFX9-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; GFX9-NEXT:    v_max_u32_e32 v1, 1, v1
+; GFX9-NEXT:    v_cmp_lt_u32_e32 vcc, v1, v0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dword v3, v2, s[2:3]
+; GFX9-NEXT:    global_load_dword v3, v2, s[0:1]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
-; GFX9-NEXT:    global_store_dword v2, v0, s[2:3]
+; GFX9-NEXT:    global_store_dword v2, v0, s[0:1]
 ; GFX9-NEXT:    s_endpgm
 bb:
   %x = tail call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index e17106176326c4..269e5d50c82e08 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -9,10 +9,9 @@ define i1 @test1(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, 1000
   %cmp2 = icmp slt i32 %arg2, 1000
@@ -24,10 +23,9 @@ define i1 @test2(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test2:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg1, 1000
   %cmp2 = icmp ult i32 %arg2, 1000
@@ -39,10 +37,9 @@ define i1 @test3(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test3:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0x3e9, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, 0x3e9, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sle i32 %arg1, 1000
   %cmp2 = icmp sle i32 %arg2, 1000
@@ -54,10 +51,9 @@ define i1 @test4(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 0x3e9, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, 0x3e9, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ule i32 %arg1, 1000
   %cmp2 = icmp ule i32 %arg2, 1000
@@ -69,10 +65,9 @@ define i1 @test5(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test5:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, 1000
   %cmp2 = icmp sgt i32 %arg2, 1000
@@ -84,10 +79,9 @@ define i1 @test6(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test6:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg1, 1000
   %cmp2 = icmp ugt i32 %arg2, 1000
@@ -99,10 +93,9 @@ define i1 @test7(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test7:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, 0x3e7, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sge i32 %arg1, 1000
   %cmp2 = icmp sge i32 %arg2, 1000
@@ -114,10 +107,9 @@ define i1 @test8(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test8:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 0x3e7, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 0x3e7, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp uge i32 %arg1, 1000
   %cmp2 = icmp uge i32 %arg2, 1000
@@ -129,10 +121,9 @@ define i1 @test9(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test9:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, %arg3
   %cmp2 = icmp slt i32 %arg2, %arg3
@@ -144,10 +135,9 @@ define i1 @test10(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test10:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg1, %arg3
   %cmp2 = icmp ult i32 %arg2, %arg3
@@ -159,10 +149,9 @@ define i1 @test11(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test11:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_le_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_le_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sle i32 %arg1, %arg3
   %cmp2 = icmp sle i32 %arg2, %arg3
@@ -174,10 +163,9 @@ define i1 @test12(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test12:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_le_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_le_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ule i32 %arg1, %arg3
   %cmp2 = icmp ule i32 %arg2, %arg3
@@ -189,10 +177,9 @@ define i1 @test13(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test13:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, %arg3
   %cmp2 = icmp sgt i32 %arg2, %arg3
@@ -204,10 +191,9 @@ define i1 @test14(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test14:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg1, %arg3
   %cmp2 = icmp ugt i32 %arg2, %arg3
@@ -219,10 +205,9 @@ define i1 @test15(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test15:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_ge_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_ge_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sge i32 %arg1, %arg3
   %cmp2 = icmp sge i32 %arg2, %arg3
@@ -234,10 +219,9 @@ define i1 @test16(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test16:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_ge_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp uge i32 %arg1, %arg3
   %cmp2 = icmp uge i32 %arg2, %arg3
@@ -249,10 +233,9 @@ define i1 @test17(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test17:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, 1000
   %cmp2 = icmp slt i32 %arg2, 1000
@@ -264,10 +247,9 @@ define i1 @test18(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test18:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg1, 1000
   %cmp2 = icmp ult i32 %arg2, 1000
@@ -279,10 +261,9 @@ define i1 @test19(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test19:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0x3e9, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, 0x3e9, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sle i32 %arg1, 1000
   %cmp2 = icmp sle i32 %arg2, 1000
@@ -294,10 +275,9 @@ define i1 @test20(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test20:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, 0x3e9, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, 0x3e9, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ule i32 %arg1, 1000
   %cmp2 = icmp ule i32 %arg2, 1000
@@ -309,10 +289,9 @@ define i1 @test21(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test21:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, 1000
   %cmp2 = icmp sgt i32 %arg2, 1000
@@ -324,10 +303,9 @@ define i1 @test22(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test22:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 0x3e8, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 0x3e8, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg1, 1000
   %cmp2 = icmp ugt i32 %arg2, 1000
@@ -339,10 +317,9 @@ define i1 @test23(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test23:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, 0x3e7, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sge i32 %arg1, 1000
   %cmp2 = icmp sge i32 %arg2, 1000
@@ -354,10 +331,9 @@ define i1 @test24(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test24:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, 0x3e7, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, 0x3e7, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp uge i32 %arg1, 1000
   %cmp2 = icmp uge i32 %arg2, 1000
@@ -369,10 +345,9 @@ define i1 @test25(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test25:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, %arg3
   %cmp2 = icmp slt i32 %arg2, %arg3
@@ -384,10 +359,9 @@ define i1 @test26(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test26:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg1, %arg3
   %cmp2 = icmp ult i32 %arg2, %arg3
@@ -399,10 +373,9 @@ define i1 @test27(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test27:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_le_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_le_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sle i32 %arg1, %arg3
   %cmp2 = icmp sle i32 %arg2, %arg3
@@ -414,10 +387,9 @@ define i1 @test28(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test28:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_le_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_le_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ule i32 %arg1, %arg3
   %cmp2 = icmp ule i32 %arg2, %arg3
@@ -429,10 +401,9 @@ define i1 @test29(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test29:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, %arg3
   %cmp2 = icmp sgt i32 %arg2, %arg3
@@ -444,10 +415,9 @@ define i1 @test30(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test30:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg1, %arg3
   %cmp2 = icmp ugt i32 %arg2, %arg3
@@ -459,10 +429,9 @@ define i1 @test31(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test31:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_ge_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_ge_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sge i32 %arg1, %arg3
   %cmp2 = icmp sge i32 %arg2, %arg3
@@ -474,10 +443,9 @@ define i1 @test32(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_ge_u32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp uge i32 %arg1, %arg3
   %cmp2 = icmp uge i32 %arg2, %arg3
@@ -489,10 +457,9 @@ define i1 @test33(i32 %arg1, i32 %arg2) {
 ; CHECK-LABEL: test33:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v1
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, 0x3e8, v0
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_max_i32_e32 v1, 0x3e8, v1
+; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, %arg2
   %cmp2 = icmp slt i32 %arg1, 1000
@@ -504,13 +471,11 @@ define amdgpu_gfx void @test34(i32 inreg %arg1, i32 inreg %arg2) {
 ; CHECK-LABEL: test34:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmpk_lt_i32 s4, 0x3e9
+; CHECK-NEXT:    s_min_i32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmpk_lt_i32 s5, 0x3e9
+; CHECK-NEXT:    s_cmpk_lt_i32 s0, 0x3e9
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_or_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -526,13 +491,11 @@ define amdgpu_gfx void @test35(i32 inreg %arg1, i32 inreg %arg2) {
 ; CHECK-LABEL: test35:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmpk_gt_i32 s4, 0x3e8
+; CHECK-NEXT:    s_max_i32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmpk_gt_i32 s5, 0x3e8
+; CHECK-NEXT:    s_cmpk_gt_i32 s0, 0x3e8
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_or_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -548,13 +511,11 @@ define amdgpu_gfx void @test36(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3
 ; CHECK-LABEL: test36:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmp_lt_u32 s4, s6
+; CHECK-NEXT:    s_min_u32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmp_lt_u32 s5, s6
+; CHECK-NEXT:    s_cmp_lt_u32 s0, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_or_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -570,13 +531,11 @@ define amdgpu_gfx void @test37(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3
 ; CHECK-LABEL: test37:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmp_ge_i32 s4, s6
+; CHECK-NEXT:    s_max_i32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmp_ge_i32 s5, s6
+; CHECK-NEXT:    s_cmp_ge_i32 s0, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_or_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -592,13 +551,11 @@ define amdgpu_gfx void @test38(i32 inreg %arg1, i32 inreg %arg2) {
 ; CHECK-LABEL: test38:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmpk_lt_u32 s4, 0x3e9
+; CHECK-NEXT:    s_max_u32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmpk_lt_u32 s5, 0x3e9
+; CHECK-NEXT:    s_cmpk_lt_u32 s0, 0x3e9
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_and_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -614,13 +571,11 @@ define amdgpu_gfx void @test39(i32 inreg %arg1, i32 inreg %arg2) {
 ; CHECK-LABEL: test39:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmpk_gt_i32 s4, 0x3e7
+; CHECK-NEXT:    s_min_i32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmpk_gt_i32 s5, 0x3e7
+; CHECK-NEXT:    s_cmpk_gt_i32 s0, 0x3e7
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_and_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -636,13 +591,11 @@ define amdgpu_gfx void @test40(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3
 ; CHECK-LABEL: test40:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmp_le_i32 s4, s6
+; CHECK-NEXT:    s_max_i32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmp_le_i32 s5, s6
+; CHECK-NEXT:    s_cmp_le_i32 s0, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_and_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -658,13 +611,11 @@ define amdgpu_gfx void @test41(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3
 ; CHECK-LABEL: test41:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_cmp_ge_u32 s4, s6
+; CHECK-NEXT:    s_min_u32 s0, s4, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
-; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
-; CHECK-NEXT:    s_cmp_ge_u32 s5, s6
+; CHECK-NEXT:    s_cmp_ge_u32 s0, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v1, 0
-; CHECK-NEXT:    s_cselect_b32 s1, -1, 0
-; CHECK-NEXT:    s_and_b32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b32 s0, -1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s0
 ; CHECK-NEXT:    global_store_b8 v[0:1], v2, off dlc
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
@@ -680,10 +631,9 @@ define i1 @test42(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test42:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, v2, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg3, %arg1
   %cmp2 = icmp ult i32 %arg3, %arg2
@@ -695,10 +645,9 @@ define i1 @test43(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test43:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_lt_u32_e64 s0, v2, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ult i32 %arg3, %arg1
   %cmp2 = icmp ult i32 %arg3, %arg2
@@ -710,10 +659,9 @@ define i1 @test44(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test44:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, v2, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_max_u32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg3, %arg1
   %cmp2 = icmp ugt i32 %arg3, %arg2
@@ -725,10 +673,9 @@ define i1 @test45(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test45:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_gt_u32_e64 s0, v2, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_min_u32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp ugt i32 %arg3, %arg1
   %cmp2 = icmp ugt i32 %arg3, %arg2
@@ -740,10 +687,9 @@ define i1 @test46(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test46:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg3, %arg1
   %cmp2 = icmp sgt i32 %arg2, %arg3
@@ -755,10 +701,9 @@ define i1 @test47(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test47:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v2, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, %arg3
   %cmp2 = icmp slt i32 %arg3, %arg2
@@ -770,10 +715,9 @@ define i1 @test48(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test48:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v2, v1
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, %arg3
   %cmp2 = icmp sgt i32 %arg3, %arg2
@@ -785,10 +729,9 @@ define i1 @test49(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test49:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_or_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg3, %arg1
   %cmp2 = icmp slt i32 %arg2, %arg3
@@ -800,10 +743,9 @@ define i1 @test50(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test50:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg3, %arg1
   %cmp2 = icmp sgt i32 %arg2, %arg3
@@ -815,10 +757,9 @@ define i1 @test51(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test51:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_min_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v2, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg1, %arg3
   %cmp2 = icmp slt i32 %arg3, %arg2
@@ -830,10 +771,9 @@ define i1 @test52(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test52:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
 ; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
-; CHECK-NEXT:    v_cmp_gt_i32_e64 s0, v2, v1
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp slt i32 %arg1, %arg3
   %cmp2 = icmp sgt i32 %arg3, %arg2
@@ -845,10 +785,9 @@ define i1 @test53(i32 %arg1, i32 %arg2, i32 %arg3) {
 ; CHECK-LABEL: test53:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_cmp_gt_i32_e32 vcc_lo, v2, v0
-; CHECK-NEXT:    v_cmp_lt_i32_e64 s0, v1, v2
-; CHECK-NEXT:    s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0
+; CHECK-NEXT:    v_max_i32_e32 v0, v0, v1
+; CHECK-NEXT:    v_cmp_lt_i32_e32 vcc_lo, v0, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %cmp1 = icmp sgt i32 %arg3, %arg1
   %cmp2 = icmp slt i32 %arg2, %arg3

diff  --git a/llvm/test/CodeGen/PowerPC/setcc-logic.ll b/llvm/test/CodeGen/PowerPC/setcc-logic.ll
index 7dca47128a5b48..e3ff0ff7550351 100644
--- a/llvm/test/CodeGen/PowerPC/setcc-logic.ll
+++ b/llvm/test/CodeGen/PowerPC/setcc-logic.ll
@@ -325,9 +325,9 @@ define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
 define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
 ; CHECK-LABEL: all_sign_bits_clear_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xxleqv 36, 36, 36
-; CHECK-NEXT:    xxlor 34, 34, 35
-; CHECK-NEXT:    vcmpgtsw 2, 2, 4
+; CHECK-NEXT:    vminsw 2, 2, 3
+; CHECK-NEXT:    xxleqv 35, 35, 35
+; CHECK-NEXT:    vcmpgtsw 2, 2, 3
 ; CHECK-NEXT:    blr
   %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
   %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -351,9 +351,9 @@ define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
 define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
 ; CHECK-LABEL: all_sign_bits_set_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xxlxor 36, 36, 36
-; CHECK-NEXT:    xxland 34, 34, 35
-; CHECK-NEXT:    vcmpgtsw 2, 4, 2
+; CHECK-NEXT:    vmaxsw 2, 2, 3
+; CHECK-NEXT:    xxlxor 35, 35, 35
+; CHECK-NEXT:    vcmpgtsw 2, 3, 2
 ; CHECK-NEXT:    blr
   %a = icmp slt <4 x i32> %P, zeroinitializer
   %b = icmp slt <4 x i32> %Q, zeroinitializer
@@ -378,9 +378,9 @@ define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
 define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
 ; CHECK-LABEL: any_sign_bits_set_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xxlxor 36, 36, 36
-; CHECK-NEXT:    xxlor 34, 34, 35
-; CHECK-NEXT:    vcmpgtsw 2, 4, 2
+; CHECK-NEXT:    vminsw 2, 2, 3
+; CHECK-NEXT:    xxlxor 35, 35, 35
+; CHECK-NEXT:    vcmpgtsw 2, 3, 2
 ; CHECK-NEXT:    blr
   %a = icmp slt <4 x i32> %P, zeroinitializer
   %b = icmp slt <4 x i32> %Q, zeroinitializer
@@ -405,9 +405,9 @@ define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
 define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
 ; CHECK-LABEL: any_sign_bits_clear_vec:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    xxleqv 36, 36, 36
-; CHECK-NEXT:    xxland 34, 34, 35
-; CHECK-NEXT:    vcmpgtsw 2, 2, 4
+; CHECK-NEXT:    vmaxsw 2, 2, 3
+; CHECK-NEXT:    xxleqv 35, 35, 35
+; CHECK-NEXT:    vcmpgtsw 2, 2, 3
 ; CHECK-NEXT:    blr
   %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
   %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>

diff  --git a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
index b94c50dd7e8c8f..8910c7601b0e5a 100644
--- a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
@@ -223,7 +223,7 @@ define i1 @flo(float %c, float %a, float %b) {
 ; CHECK-RV64I-NEXT:    mv a0, s0
 ; CHECK-RV64I-NEXT:    mv a1, s1
 ; CHECK-RV64I-NEXT:    call __gesf2 at plt
-; CHECK-RV64I-NEXT:    or a0, s2, a0
+; CHECK-RV64I-NEXT:    min a0, s2, a0
 ; CHECK-RV64I-NEXT:    slti a0, a0, 0
 ; CHECK-RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
@@ -269,7 +269,7 @@ define i1 @dlo(double %c, double %a, double %b) {
 ; CHECK-NEXT:    mv a0, s0
 ; CHECK-NEXT:    mv a1, s1
 ; CHECK-NEXT:    call __gedf2 at plt
-; CHECK-NEXT:    or a0, s2, a0
+; CHECK-NEXT:    min a0, s2, a0
 ; CHECK-NEXT:    slti a0, a0, 0
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload

diff  --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll
index b6eebe95cdd331..6ed44771f703e5 100644
--- a/llvm/test/CodeGen/X86/movmsk-cmp.ll
+++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll
@@ -422,15 +422,25 @@ define i1 @allzeros_v16i16_sign(<16 x i16> %arg) {
 }
 
 define i1 @allones_v32i16_sign(<32 x i16> %arg) {
-; SSE-LABEL: allones_v32i16_sign:
-; SSE:       # %bb.0:
-; SSE-NEXT:    pand %xmm3, %xmm1
-; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
-; SSE-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; SSE-NEXT:    sete %al
-; SSE-NEXT:    retq
+; SSE2-LABEL: allones_v32i16_sign:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    pand %xmm2, %xmm0
+; SSE2-NEXT:    packsswb %xmm1, %xmm0
+; SSE2-NEXT:    pmovmskb %xmm0, %eax
+; SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; SSE2-NEXT:    sete %al
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: allones_v32i16_sign:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pmaxsw %xmm3, %xmm1
+; SSE41-NEXT:    pmaxsw %xmm2, %xmm0
+; SSE41-NEXT:    packsswb %xmm1, %xmm0
+; SSE41-NEXT:    pmovmskb %xmm0, %eax
+; SSE41-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
+; SSE41-NEXT:    sete %al
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: allones_v32i16_sign:
 ; AVX1:       # %bb.0:
@@ -486,15 +496,25 @@ define i1 @allones_v32i16_sign(<32 x i16> %arg) {
 }
 
 define i1 @allzeros_v32i16_sign(<32 x i16> %arg) {
-; SSE-LABEL: allzeros_v32i16_sign:
-; SSE:       # %bb.0:
-; SSE-NEXT:    por %xmm3, %xmm1
-; SSE-NEXT:    por %xmm2, %xmm0
-; SSE-NEXT:    packsswb %xmm1, %xmm0
-; SSE-NEXT:    pmovmskb %xmm0, %eax
-; SSE-NEXT:    testl %eax, %eax
-; SSE-NEXT:    sete %al
-; SSE-NEXT:    retq
+; SSE2-LABEL: allzeros_v32i16_sign:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    packsswb %xmm1, %xmm0
+; SSE2-NEXT:    pmovmskb %xmm0, %eax
+; SSE2-NEXT:    testl %eax, %eax
+; SSE2-NEXT:    sete %al
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: allzeros_v32i16_sign:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    pminsw %xmm3, %xmm1
+; SSE41-NEXT:    pminsw %xmm2, %xmm0
+; SSE41-NEXT:    packsswb %xmm1, %xmm0
+; SSE41-NEXT:    pmovmskb %xmm0, %eax
+; SSE41-NEXT:    testl %eax, %eax
+; SSE41-NEXT:    sete %al
+; SSE41-NEXT:    retq
 ;
 ; AVX1-LABEL: allzeros_v32i16_sign:
 ; AVX1:       # %bb.0:


        


More information about the llvm-commits mailing list