[llvm] [DAGCombiner] Freeze maybe poison operands when folding select to logic (PR #84924)

Björn Pettersson via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 15 07:00:41 PDT 2024


https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/84924

>From 5009e9a474be11804e2f205bfc72315bff5f9072 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Fri, 15 Mar 2024 14:20:26 +0100
Subject: [PATCH 1/2] Add test cases for SELECT->AND miscompiles in DAGCombiner

Adding reproducers for github issues #84653 and #85190.
---
 llvm/test/CodeGen/RISCV/pr84653_pr85190.ll | 95 ++++++++++++++++++++++
 1 file changed, 95 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/pr84653_pr85190.ll

diff --git a/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
new file mode 100644
index 00000000000000..3fa494e1a57ddc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=CHECK-NOZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=CHECK-ZBB
+
+; This test case miscompiled for ZBB (DAGCombiner turned a SELECT into a more
+; poisonous AND operation).
+define i1 @pr84653(i32 %x) {
+; CHECK-NOZBB-LABEL: pr84653:
+; CHECK-NOZBB:       # %bb.0:
+; CHECK-NOZBB-NEXT:    sext.w a1, a0
+; CHECK-NOZBB-NEXT:    sgtz a2, a1
+; CHECK-NOZBB-NEXT:    lui a3, 524288
+; CHECK-NOZBB-NEXT:    addi a3, a3, -1
+; CHECK-NOZBB-NEXT:    xor a0, a0, a3
+; CHECK-NOZBB-NEXT:    sext.w a0, a0
+; CHECK-NOZBB-NEXT:    slt a0, a0, a1
+; CHECK-NOZBB-NEXT:    and a0, a2, a0
+; CHECK-NOZBB-NEXT:    ret
+;
+; CHECK-ZBB-LABEL: pr84653:
+; CHECK-ZBB:       # %bb.0:
+; CHECK-ZBB-NEXT:    sext.w a1, a0
+; CHECK-ZBB-NEXT:    lui a2, 524288
+; CHECK-ZBB-NEXT:    addi a2, a2, -1
+; CHECK-ZBB-NEXT:    xor a0, a0, a2
+; CHECK-ZBB-NEXT:    sext.w a0, a0
+; CHECK-ZBB-NEXT:    max a0, a0, zero
+; CHECK-ZBB-NEXT:    slt a0, a0, a1
+; CHECK-ZBB-NEXT:    ret
+  %cmp1 = icmp sgt i32 %x, 0
+  %sub = sub nsw i32 2147483647, %x  ; 0x7fffffff
+  %cmp2 = icmp sgt i32 %x, %sub
+  %r = select i1 %cmp1, i1 %cmp2, i1 false
+  ret i1 %r
+}
+
+; This test case miscompiled for ZBB (DAGCombiner turned a SELECT into a more
+; poisonous AND operation).
+define i1 @pr85190(i64 %a) {
+; CHECK-NOZBB-LABEL: pr85190:
+; CHECK-NOZBB:       # %bb.0:
+; CHECK-NOZBB-NEXT:    ori a1, a0, 7
+; CHECK-NOZBB-NEXT:    slti a2, a0, 0
+; CHECK-NOZBB-NEXT:    li a3, -1
+; CHECK-NOZBB-NEXT:    slli a3, a3, 63
+; CHECK-NOZBB-NEXT:    sub a3, a3, a1
+; CHECK-NOZBB-NEXT:    slt a0, a0, a3
+; CHECK-NOZBB-NEXT:    and a0, a2, a0
+; CHECK-NOZBB-NEXT:    ret
+;
+; CHECK-ZBB-LABEL: pr85190:
+; CHECK-ZBB:       # %bb.0:
+; CHECK-ZBB-NEXT:    ori a1, a0, 7
+; CHECK-ZBB-NEXT:    li a2, -1
+; CHECK-ZBB-NEXT:    slli a2, a2, 63
+; CHECK-ZBB-NEXT:    sub a2, a2, a1
+; CHECK-ZBB-NEXT:    slt a0, a0, a2
+; CHECK-ZBB-NEXT:    ret
+  %or = or i64 %a, 7
+  %cmp1 = icmp slt i64 %a, 0
+  %sub = sub nsw i64 -9223372036854775808, %or  ; 0x8000000000000000
+  %cmp2 = icmp sgt i64 %sub, %a
+  %res = select i1 %cmp1, i1 %cmp2, i1 false
+  ret i1 %res
+}
+
+define i1 @select_to_or(i32 %x) {
+; CHECK-NOZBB-LABEL: select_to_or:
+; CHECK-NOZBB:       # %bb.0:
+; CHECK-NOZBB-NEXT:    sext.w a1, a0
+; CHECK-NOZBB-NEXT:    sgtz a2, a1
+; CHECK-NOZBB-NEXT:    lui a3, 524288
+; CHECK-NOZBB-NEXT:    addi a3, a3, -1
+; CHECK-NOZBB-NEXT:    xor a0, a0, a3
+; CHECK-NOZBB-NEXT:    sext.w a0, a0
+; CHECK-NOZBB-NEXT:    slt a0, a0, a1
+; CHECK-NOZBB-NEXT:    or a0, a2, a0
+; CHECK-NOZBB-NEXT:    ret
+;
+; CHECK-ZBB-LABEL: select_to_or:
+; CHECK-ZBB:       # %bb.0:
+; CHECK-ZBB-NEXT:    sext.w a1, a0
+; CHECK-ZBB-NEXT:    lui a2, 524288
+; CHECK-ZBB-NEXT:    addi a2, a2, -1
+; CHECK-ZBB-NEXT:    xor a0, a0, a2
+; CHECK-ZBB-NEXT:    sext.w a0, a0
+; CHECK-ZBB-NEXT:    min a0, a0, zero
+; CHECK-ZBB-NEXT:    slt a0, a0, a1
+; CHECK-ZBB-NEXT:    ret
+  %cmp1 = icmp sgt i32 %x, 0
+  %sub = sub nsw i32 2147483647, %x  ; 0x7fffffff
+  %cmp2 = icmp sgt i32 %x, %sub
+  %r = select i1 %cmp1, i1 true, i1 %cmp2
+  ret i1 %r
+}

>From 1d369bfc252b4ae1d69f7fca0516ce89344ae019 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Tue, 12 Mar 2024 15:24:33 +0100
Subject: [PATCH 2/2] [DAGCombiner] Freeze maybe poison operands when folding
 select to logic

Just like for regular IR we need to treat SELECT as conditionally
blocking poison. So (unless the condition itself is poison) the
result is only poison if the selected true/false value is poison.
Thus, when doing DAG combines that turn SELECT into arithmetic/logical
operations (e.g. AND/OR) we need to make sure that the new operations
aren't more poisonous. One way to do that is to use FREEZE to make
sure the operands aren't posion.

This patch aims at fixing the kind of miscompiles reported in
  https://github.com/llvm/llvm-project/issues/84653
and
  https://github.com/llvm/llvm-project/issues/85190

Solution is to make sure that we insert FREEZE, if needed to make
the fold sound, when using the foldBoolSelectToLogic and
foldVSelectToSignBitSplatMask DAG combines.

This may result in some (hopefully minor) regressions since we lack
some ways to fold away the freeze (or due to isGuaranteedNotToBePoison
being too pessimistic). Focus in this patch is to just avoid
miscompiles, but I think some of the regressions can be avoided by
general improvements regarding poison/freeze handling in SelectionDAG.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  30 ++-
 llvm/test/CodeGen/AArch64/cmp-chains.ll       | 188 +++++++-----------
 ...rleaving-reductions-predicated-scalable.ll |   3 +-
 llvm/test/CodeGen/AArch64/fast-isel-select.ll |  20 +-
 .../AArch64/intrinsic-cttz-elts-sve.ll        |   3 +-
 .../CodeGen/AArch64/select-with-and-or.ll     |  26 ++-
 .../CodeGen/AArch64/sve-fp-int-min-max.ll     |   3 +-
 llvm/test/CodeGen/AMDGPU/div_i128.ll          |  64 +++---
 .../AMDGPU/divergence-driven-trunc-to-i1.ll   |  48 +++--
 llvm/test/CodeGen/AMDGPU/rem_i128.ll          |  64 +++---
 llvm/test/CodeGen/AMDGPU/shift-i128.ll        |  72 +++----
 llvm/test/CodeGen/PowerPC/pr40922.ll          |   7 +-
 llvm/test/CodeGen/RISCV/pr84653_pr85190.ll    |  28 +--
 llvm/test/CodeGen/SystemZ/pr60413.ll          | 177 +++++++----------
 llvm/test/CodeGen/Thumb2/csel-andor-onebit.ll |   8 +-
 llvm/test/CodeGen/VE/Scalar/max.ll            |   2 +
 llvm/test/CodeGen/VE/Scalar/min.ll            |   2 +
 llvm/test/CodeGen/X86/avx512-logic.ll         |  12 +-
 llvm/test/CodeGen/X86/avx512-vec-cmp.ll       |   8 +-
 llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll     |   8 +-
 llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll   |  16 +-
 llvm/test/CodeGen/X86/avx512vl-logic.ll       |  24 +--
 llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll     |  56 +++---
 .../div-rem-pair-recomposition-unsigned.ll    |  51 +++--
 llvm/test/CodeGen/X86/fcmp-logic.ll           |  12 +-
 llvm/test/CodeGen/X86/fold-select.ll          |  10 +-
 llvm/test/CodeGen/X86/pr64589.ll              |   4 +-
 .../test/CodeGen/X86/vector-compare-all_of.ll |  37 +++-
 .../test/CodeGen/X86/vector-compare-any_of.ll |  29 ++-
 29 files changed, 489 insertions(+), 523 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 87033e824aa593..226a01fa0d31dc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -11307,28 +11307,34 @@ static SDValue foldBoolSelectToLogic(SDNode *N, SelectionDAG &DAG) {
   if (VT != Cond.getValueType() || VT.getScalarSizeInBits() != 1)
     return SDValue();
 
-  // select Cond, Cond, F --> or Cond, F
-  // select Cond, 1, F    --> or Cond, F
+  auto FreezeIfNeeded = [&](SDValue V) {
+    if (!DAG.isGuaranteedNotToBePoison(V))
+      return DAG.getFreeze(V);
+    return V;
+  };
+
+  // select Cond, Cond, F --> or Cond, freeze(F)
+  // select Cond, 1, F    --> or Cond, freeze(F)
   if (Cond == T || isOneOrOneSplat(T, /* AllowUndefs */ true))
-    return matcher.getNode(ISD::OR, SDLoc(N), VT, Cond, F);
+    return matcher.getNode(ISD::OR, SDLoc(N), VT, Cond, FreezeIfNeeded(F));
 
   // select Cond, T, Cond --> and Cond, T
   // select Cond, T, 0    --> and Cond, T
   if (Cond == F || isNullOrNullSplat(F, /* AllowUndefs */ true))
-    return matcher.getNode(ISD::AND, SDLoc(N), VT, Cond, T);
+    return matcher.getNode(ISD::AND, SDLoc(N), VT, Cond, FreezeIfNeeded(T));
 
   // select Cond, T, 1 --> or (not Cond), T
   if (isOneOrOneSplat(F, /* AllowUndefs */ true)) {
     SDValue NotCond = matcher.getNode(ISD::XOR, SDLoc(N), VT, Cond,
                                       DAG.getAllOnesConstant(SDLoc(N), VT));
-    return matcher.getNode(ISD::OR, SDLoc(N), VT, NotCond, T);
+    return matcher.getNode(ISD::OR, SDLoc(N), VT, NotCond, FreezeIfNeeded(T));
   }
 
   // select Cond, 0, F --> and (not Cond), F
   if (isNullOrNullSplat(T, /* AllowUndefs */ true)) {
     SDValue NotCond = matcher.getNode(ISD::XOR, SDLoc(N), VT, Cond,
                                       DAG.getAllOnesConstant(SDLoc(N), VT));
-    return matcher.getNode(ISD::AND, SDLoc(N), VT, NotCond, F);
+    return matcher.getNode(ISD::AND, SDLoc(N), VT, NotCond, FreezeIfNeeded(F));
   }
 
   return SDValue();
@@ -11357,12 +11363,18 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
   else
     return SDValue();
 
+  auto FreezeIfNeeded = [&](SDValue V) {
+    if (!DAG.isGuaranteedNotToBePoison(V))
+      return DAG.getFreeze(V);
+    return V;
+  };
+
   // (Cond0 s< 0) ? N1 : 0 --> (Cond0 s>> BW-1) & N1
   if (isNullOrNullSplat(N2)) {
     SDLoc DL(N);
     SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
     SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt);
-    return DAG.getNode(ISD::AND, DL, VT, Sra, N1);
+    return DAG.getNode(ISD::AND, DL, VT, Sra, FreezeIfNeeded(N1));
   }
 
   // (Cond0 s< 0) ? -1 : N2 --> (Cond0 s>> BW-1) | N2
@@ -11370,7 +11382,7 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
     SDLoc DL(N);
     SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
     SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt);
-    return DAG.getNode(ISD::OR, DL, VT, Sra, N2);
+    return DAG.getNode(ISD::OR, DL, VT, Sra, FreezeIfNeeded(N2));
   }
 
   // If we have to invert the sign bit mask, only do that transform if the
@@ -11382,7 +11394,7 @@ static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {
     SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT);
     SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt);
     SDValue Not = DAG.getNOT(DL, Sra, VT);
-    return DAG.getNode(ISD::AND, DL, VT, Not, N2);
+    return DAG.getNode(ISD::AND, DL, VT, Not, FreezeIfNeeded(N2));
   }
 
   // TODO: There's another pattern in this family, but it may require
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 1d9f39e5185939..8cb525f14cc813 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -6,21 +6,14 @@
 
 ; (x0 < x1) && (x2 > x3)
 define i32 @cmp_and2(i32 %0, i32 %1, i32 %2, i32 %3) {
-; SDISEL-LABEL: cmp_and2:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w0, w1
-; SDISEL-NEXT:    ccmp w2, w3, #0, lo
-; SDISEL-NEXT:    cset w0, hi
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_and2:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w9, hi
-; GISEL-NEXT:    and w0, w8, w9
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_and2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
   %5 = icmp ult i32 %0, %1
   %6 = icmp ugt i32 %2, %3
   %7 = select i1 %5, i1 %6, i1 false
@@ -30,25 +23,17 @@ define i32 @cmp_and2(i32 %0, i32 %1, i32 %2, i32 %3) {
 
 ; (x0 < x1) && (x2 > x3) && (x4 != x5)
 define i32 @cmp_and3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
-; SDISEL-LABEL: cmp_and3:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w0, w1
-; SDISEL-NEXT:    ccmp w2, w3, #0, lo
-; SDISEL-NEXT:    ccmp w4, w5, #4, hi
-; SDISEL-NEXT:    cset w0, ne
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_and3:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w9, hi
-; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    and w8, w8, w9
-; GISEL-NEXT:    cset w9, ne
-; GISEL-NEXT:    and w0, w8, w9
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_and3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmp w4, w5
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
   %7 = icmp ult i32 %0, %1
   %8 = icmp ugt i32 %2, %3
   %9 = select i1 %7, i1 %8, i1 false
@@ -60,29 +45,20 @@ define i32 @cmp_and3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
 
 ; (x0 < x1) && (x2 > x3) && (x4 != x5) && (x6 == x7)
 define i32 @cmp_and4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) {
-; SDISEL-LABEL: cmp_and4:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w2, w3
-; SDISEL-NEXT:    ccmp w0, w1, #2, hi
-; SDISEL-NEXT:    ccmp w4, w5, #4, lo
-; SDISEL-NEXT:    ccmp w6, w7, #0, ne
-; SDISEL-NEXT:    cset w0, eq
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_and4:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w8, hi
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w9, lo
-; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    cset w10, ne
-; GISEL-NEXT:    cmp w6, w7
-; GISEL-NEXT:    and w8, w8, w9
-; GISEL-NEXT:    cset w11, eq
-; GISEL-NEXT:    and w9, w10, w11
-; GISEL-NEXT:    and w0, w8, w9
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_and4:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w8, hi
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w9, lo
+; CHECK-NEXT:    cmp w4, w5
+; CHECK-NEXT:    cset w10, ne
+; CHECK-NEXT:    cmp w6, w7
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    cset w11, eq
+; CHECK-NEXT:    and w9, w10, w11
+; CHECK-NEXT:    and w0, w8, w9
+; CHECK-NEXT:    ret
   %9 = icmp ugt i32 %2, %3
   %10 = icmp ult i32 %0, %1
   %11 = select i1 %9, i1 %10, i1 false
@@ -96,22 +72,15 @@ define i32 @cmp_and4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32
 
 ; (x0 < x1) || (x2 > x3)
 define i32 @cmp_or2(i32 %0, i32 %1, i32 %2, i32 %3) {
-; SDISEL-LABEL: cmp_or2:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w0, w1
-; SDISEL-NEXT:    ccmp w2, w3, #0, hs
-; SDISEL-NEXT:    cset w0, ne
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_or2:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w9, ne
-; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    and w0, w8, #0x1
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_or2:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
   %5 = icmp ult i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = select i1 %5, i1 true, i1 %6
@@ -121,26 +90,18 @@ define i32 @cmp_or2(i32 %0, i32 %1, i32 %2, i32 %3) {
 
 ; (x0 < x1) || (x2 > x3) || (x4 != x5)
 define i32 @cmp_or3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
-; SDISEL-LABEL: cmp_or3:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w0, w1
-; SDISEL-NEXT:    ccmp w2, w3, #2, hs
-; SDISEL-NEXT:    ccmp w4, w5, #0, ls
-; SDISEL-NEXT:    cset w0, ne
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_or3:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w9, hi
-; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    cset w9, ne
-; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    and w0, w8, #0x1
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_or3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmp w4, w5
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
   %7 = icmp ult i32 %0, %1
   %8 = icmp ugt i32 %2, %3
   %9 = select i1 %7, i1 true, i1 %8
@@ -152,30 +113,21 @@ define i32 @cmp_or3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
 
 ; (x0 < x1) || (x2 > x3) || (x4 != x5) || (x6 == x7)
 define i32 @cmp_or4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) {
-; SDISEL-LABEL: cmp_or4:
-; SDISEL:       // %bb.0:
-; SDISEL-NEXT:    cmp w0, w1
-; SDISEL-NEXT:    ccmp w2, w3, #2, hs
-; SDISEL-NEXT:    ccmp w4, w5, #0, ls
-; SDISEL-NEXT:    ccmp w6, w7, #4, eq
-; SDISEL-NEXT:    cset w0, eq
-; SDISEL-NEXT:    ret
-;
-; GISEL-LABEL: cmp_or4:
-; GISEL:       // %bb.0:
-; GISEL-NEXT:    cmp w0, w1
-; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    cmp w2, w3
-; GISEL-NEXT:    cset w9, hi
-; GISEL-NEXT:    cmp w4, w5
-; GISEL-NEXT:    cset w10, ne
-; GISEL-NEXT:    cmp w6, w7
-; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    cset w11, eq
-; GISEL-NEXT:    orr w9, w10, w11
-; GISEL-NEXT:    orr w8, w8, w9
-; GISEL-NEXT:    and w0, w8, #0x1
-; GISEL-NEXT:    ret
+; CHECK-LABEL: cmp_or4:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    cset w8, lo
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, hi
+; CHECK-NEXT:    cmp w4, w5
+; CHECK-NEXT:    cset w10, ne
+; CHECK-NEXT:    cmp w6, w7
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    cset w11, eq
+; CHECK-NEXT:    orr w9, w10, w11
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
+; CHECK-NEXT:    ret
   %9 = icmp ult i32 %0, %1
   %10 = icmp ugt i32 %2, %3
   %11 = select i1 %9, i1 true, i1 %10
@@ -242,5 +194,3 @@ define i32 @true_or3(i32 %0, i32 %1, i32 %2) {
   %9 = zext i1 %8 to i32
   ret i32 %9
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 467c3c254fc2d3..5ee9e89c34210b 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -236,7 +236,8 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
 ; CHECK-NEXT:    mov z7.d, z0.d
 ; CHECK-NEXT:    add x9, x9, x11
 ; CHECK-NEXT:    add x8, x8, x12
-; CHECK-NEXT:    cmpne p1.d, p1/z, z2.d, #0
+; CHECK-NEXT:    cmpne p2.d, p0/z, z2.d, #0
+; CHECK-NEXT:    and p1.b, p1/z, p1.b, p2.b
 ; CHECK-NEXT:    zip2 p3.d, p1.d, p1.d
 ; CHECK-NEXT:    zip1 p2.d, p1.d, p1.d
 ; CHECK-NEXT:    whilelo p1.d, x9, x10
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-select.ll b/llvm/test/CodeGen/AArch64/fast-isel-select.ll
index 6ad4a5ae572e0e..4ef4ee41e8aebe 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-select.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-select.ll
@@ -1,6 +1,6 @@
-; RUN: llc -mtriple=aarch64-apple-darwin                             -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-apple-darwin -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefix=GISEL
+; RUN: llc -mtriple=aarch64-apple-darwin                             -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,SISEL
+; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,FISEL
+; RUN: llc -mtriple=aarch64-apple-darwin -global-isel -verify-machineinstrs < %s | FileCheck %s --check-prefixes=GISEL
 
 ; First test the different supported value types for select.
 define zeroext i1 @select_i1(i1 zeroext %c, i1 zeroext %a, i1 zeroext %b) {
@@ -295,22 +295,28 @@ define float @select_icmp_sle(i32 %x, i32 %y, float %a, float %b) {
 ; Test peephole optimizations for select.
 define zeroext i1 @select_opt1(i1 zeroext %c, i1 zeroext %a) {
 ; CHECK-LABEL: select_opt1
-; CHECK:       orr {{w[0-9]+}}, w0, w1
+; SISEL:       orr [[REG:w[0-9]+]], w0, w1
+; SISEL:       and w0, [[REG]], #0x1
+; FISEL:       orr {{w[0-9]+}}, w0, w1
   %1 = select i1 %c, i1 true, i1 %a
   ret i1 %1
 }
 
 define zeroext i1 @select_opt2(i1 zeroext %c, i1 zeroext %a) {
 ; CHECK-LABEL: select_opt2
-; CHECK:       eor [[REG:w[0-9]+]], w0, #0x1
-; CHECK:       orr {{w[0-9]+}}, [[REG]], w1
+; SISEL:       orn [[REG:w[0-9]+]], w1, w0
+; SISEL:       and w0, [[REG]], #0x1
+; FISEL:       eor [[REG:w[0-9]+]], w0, #0x1
+; FISEL:       orr {{w[0-9]+}}, [[REG]], w1
   %1 = select i1 %c, i1 %a, i1 true
   ret i1 %1
 }
 
 define zeroext i1 @select_opt3(i1 zeroext %c, i1 zeroext %a) {
 ; CHECK-LABEL: select_opt3
-; CHECK:       bic {{w[0-9]+}}, w1, w0
+; SISEL:       eor [[REG:w[0-9]+]], w0, #0x1
+; SISEL:       and w0, [[REG]], w1
+; FISEL:       bic {{w[0-9]+}}, w1, w0
   %1 = select i1 %c, i1 false, i1 %a
   ret i1 %1
 }
diff --git a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
index 1a4ab6ab334a64..fadb0a3379e4e8 100644
--- a/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
+++ b/llvm/test/CodeGen/AArch64/intrinsic-cttz-elts-sve.ll
@@ -213,7 +213,8 @@ define i32 @ctz_and_nxv16i1(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vsca
 ; CHECK-LABEL: ctz_and_nxv16i1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT:    cmpne p2.b, p1/z, z0.b, z1.b
+; CHECK-NEXT:    and p0.b, p0/z, p0.b, p2.b
 ; CHECK-NEXT:    brkb p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cntp x0, p0, p0.b
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
diff --git a/llvm/test/CodeGen/AArch64/select-with-and-or.ll b/llvm/test/CodeGen/AArch64/select-with-and-or.ll
index 84b6818eaa739c..1fdb9d34bf1024 100644
--- a/llvm/test/CodeGen/AArch64/select-with-and-or.ll
+++ b/llvm/test/CodeGen/AArch64/select-with-and-or.ll
@@ -5,8 +5,10 @@ define i1 @and(i32 %x, i32 %y, i32 %z, i32 %w) {
 ; CHECK-LABEL: and:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, w1
-; CHECK-NEXT:    ccmp w2, w3, #4, eq
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
   %a = icmp eq i32 %x, %y
   %b = icmp sgt i32 %z, %w
@@ -18,8 +20,11 @@ define i1 @or(i32 %x, i32 %y, i32 %z, i32 %w) {
 ; CHECK-LABEL: or:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, w1
-; CHECK-NEXT:    ccmp w2, w3, #0, ne
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
   %a = icmp eq i32 %x, %y
   %b = icmp sgt i32 %z, %w
@@ -31,8 +36,10 @@ define i1 @and_not(i32 %x, i32 %y, i32 %z, i32 %w) {
 ; CHECK-LABEL: and_not:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, w1
-; CHECK-NEXT:    ccmp w2, w3, #4, ne
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
   %a = icmp eq i32 %x, %y
   %b = icmp sgt i32 %z, %w
@@ -44,8 +51,11 @@ define i1 @or_not(i32 %x, i32 %y, i32 %z, i32 %w) {
 ; CHECK-LABEL: or_not:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp w0, w1
-; CHECK-NEXT:    ccmp w2, w3, #0, eq
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w2, w3
+; CHECK-NEXT:    cset w9, gt
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
   %a = icmp eq i32 %x, %y
   %b = icmp sgt i32 %z, %w
diff --git a/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll b/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
index 5ff9f0f0df62f8..1d7ec72ccf27bb 100644
--- a/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
@@ -24,7 +24,8 @@ define i64 @scalable_int_min_max(ptr %arg, ptr %arg1, <vscale x 2 x ptr> %i37, <
 ; CHECK-NEXT:    fadd z0.s, p0/m, z0.s, z4.s
 ; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z3.s
 ; CHECK-NEXT:    add z0.d, z2.d, z1.d
-; CHECK-NEXT:    bic p2.b, p1/z, p1.b, p2.b
+; CHECK-NEXT:    not p2.b, p0/z, p2.b
+; CHECK-NEXT:    and p2.b, p1/z, p1.b, p2.b
 ; CHECK-NEXT:    mov z0.d, p2/m, z2.d
 ; CHECK-NEXT:    sel z0.d, p1, z0.d, z2.d
 ; CHECK-NEXT:    uaddv d0, p0, z0.d
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 2f3d5d9d140c2c..0ae92e1b5bbe03 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -467,28 +467,21 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[12:13]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, v7, v10, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v7, 1, v7
@@ -499,7 +492,6 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v6
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v5, v5, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
@@ -1033,10 +1025,10 @@ define i128 @v_sdiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
@@ -1586,28 +1578,21 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[12:13]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, v7, v10, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v7, 1, v7
@@ -1618,7 +1603,6 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v6
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v5, v5, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
@@ -2152,10 +2136,10 @@ define i128 @v_udiv_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
index c3a6cd5975a779..eb1e601ba53b40 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
@@ -15,13 +15,16 @@ define amdgpu_kernel void @uniform_trunc_i16_to_i1(ptr addrspace(1) %out, i16 %x
   ; GCN-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
   ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, killed [[COPY1]], %subreg.sub1, killed [[S_MOV_B32_1]], %subreg.sub2, killed [[S_MOV_B32_]], %subreg.sub3
   ; GCN-NEXT:   [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I16 [[S_LOAD_DWORD_IMM]]
-  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 65536, [[S_LOAD_DWORD_IMM]], implicit-def dead $scc
-  ; GCN-NEXT:   S_CMP_LG_U32 killed [[S_AND_B32_]], 0, implicit-def $scc
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sreg_64 = COPY $scc
-  ; GCN-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
-  ; GCN-NEXT:   S_CMP_LT_I32 killed [[S_SEXT_I32_I16_]], killed [[S_MOV_B32_2]], implicit-def $scc
+  ; GCN-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+  ; GCN-NEXT:   [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], killed [[S_MOV_B32_2]], implicit-def dead $scc
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
+  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
+  ; GCN-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
   ; GCN-NEXT:   [[COPY4:%[0-9]+]]:sreg_64 = COPY $scc
-  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY4]], killed [[COPY3]], implicit-def dead $scc
+  ; GCN-NEXT:   [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+  ; GCN-NEXT:   S_CMP_LT_I32 killed [[S_SEXT_I32_I16_]], killed [[S_MOV_B32_3]], implicit-def $scc
+  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:sreg_64 = COPY $scc
+  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY5]], killed [[COPY4]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
   ; GCN-NEXT:   BUFFER_STORE_BYTE_OFFSET killed [[V_CNDMASK_B32_e64_]], killed [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.out.load, addrspace 1)
   ; GCN-NEXT:   S_ENDPGM 0
@@ -38,7 +41,8 @@ define i1 @divergent_trunc_i16_to_i1(ptr addrspace(1) %out, i16 %x, i1 %z) {
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, killed [[COPY2]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
   ; GCN-NEXT:   [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY1]], 0, 16, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
@@ -67,13 +71,14 @@ define amdgpu_kernel void @uniform_trunc_i32_to_i1(ptr addrspace(1) %out, i32 %x
   ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, killed [[COPY1]], %subreg.sub1, killed [[S_MOV_B32_1]], %subreg.sub2, killed [[S_MOV_B32_]], %subreg.sub3
   ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0
   ; GCN-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM1]].sub1
-  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY4]], implicit-def dead $scc
+  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY killed [[COPY4]]
+  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY5]], implicit-def dead $scc
   ; GCN-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:sreg_64 = COPY $scc
+  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:sreg_64 = COPY $scc
   ; GCN-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
   ; GCN-NEXT:   S_CMP_LT_I32 killed [[COPY3]], killed [[S_MOV_B32_2]], implicit-def $scc
-  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:sreg_64 = COPY $scc
-  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY6]], killed [[COPY5]], implicit-def dead $scc
+  ; GCN-NEXT:   [[COPY7:%[0-9]+]]:sreg_64 = COPY $scc
+  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY7]], killed [[COPY6]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
   ; GCN-NEXT:   BUFFER_STORE_BYTE_OFFSET killed [[V_CNDMASK_B32_e64_]], killed [[REG_SEQUENCE]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.out.load, addrspace 1)
   ; GCN-NEXT:   S_ENDPGM 0
@@ -90,7 +95,8 @@ define i1 @divergent_trunc_i32_to_i1(ptr addrspace(1) %out, i32 %x, i1 %z) {
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, killed [[COPY2]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
   ; GCN-NEXT:   [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 [[COPY1]], killed [[S_MOV_B32_]], implicit $exec
@@ -122,13 +128,14 @@ define amdgpu_kernel void @uniform_trunc_i64_to_i1(ptr addrspace(1) %out, i64 %x
   ; GCN-NEXT:   [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX4_IMM]].sub3
   ; GCN-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX4_IMM]].sub2
   ; GCN-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[COPY6]], %subreg.sub0, killed [[COPY5]], %subreg.sub1
-  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[S_LOAD_DWORD_IMM]], implicit-def dead $scc
+  ; GCN-NEXT:   [[COPY7:%[0-9]+]]:sreg_32 = COPY killed [[S_LOAD_DWORD_IMM]]
+  ; GCN-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY7]], implicit-def dead $scc
   ; GCN-NEXT:   S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
-  ; GCN-NEXT:   [[COPY7:%[0-9]+]]:sreg_64 = COPY $scc
+  ; GCN-NEXT:   [[COPY8:%[0-9]+]]:sreg_64 = COPY $scc
   ; GCN-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
-  ; GCN-NEXT:   [[COPY8:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
-  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
-  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[COPY7]], implicit-def dead $scc
+  ; GCN-NEXT:   [[COPY9:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
+  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE2]], [[COPY9]], implicit $exec
+  ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[COPY8]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
   ; GCN-NEXT:   BUFFER_STORE_BYTE_OFFSET killed [[V_CNDMASK_B32_e64_]], killed [[REG_SEQUENCE1]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.2, addrspace 1)
   ; GCN-NEXT:   S_ENDPGM 0
@@ -147,11 +154,12 @@ define i1 @divergent_trunc_i64_to_i1(ptr addrspace(1) %out, i64 %x, i1 %z) {
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
   ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, killed [[COPY3]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
-  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY3]], implicit $exec
+  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
+  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY4]], implicit $exec
   ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
   ; GCN-NEXT:   $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
index 6ba66ccf71868e..138db876056712 100644
--- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -501,28 +501,21 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[12:13]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, v7, v10, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v7, 1, v7
@@ -533,7 +526,6 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v6
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v5, v5, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
@@ -1067,10 +1059,10 @@ define i128 @v_srem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
@@ -1899,28 +1891,21 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v9
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; implicit-def: $sgpr8
 ; GFX9-O0-NEXT:    ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v9
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v8
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v6
-; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v5
-; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_mov_b64 s[8:9], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[8:9]
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_cmp_eq_u64_e64 s[8:9], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    s_mov_b64 s[12:13], 0x7f
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[12:13]
-; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_gt_u64_e64 s[14:15], v[5:6], s[12:13]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v10, 0, 1, s[14:15]
-; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[6:7]
-; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[14:15]
+; GFX9-O0-NEXT:    v_cmp_ne_u64_e64 s[14:15], v[8:9], s[6:7]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, 0, 1, s[14:15]
 ; GFX9-O0-NEXT:    v_cndmask_b32_e64 v7, v7, v10, s[8:9]
 ; GFX9-O0-NEXT:    v_and_b32_e64 v7, 1, v7
@@ -1931,7 +1916,6 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v6
 ; GFX9-O0-NEXT:    s_mov_b32 s14, s13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v7, v7, s14
-; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 killed $vgpr5_vgpr6 killed $exec
 ; GFX9-O0-NEXT:    ; kill: def $sgpr12 killed $sgpr12 killed $sgpr12_sgpr13
 ; GFX9-O0-NEXT:    v_xor_b32_e64 v5, v5, s12
 ; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
@@ -2465,10 +2449,10 @@ define i128 @v_urem_i128_vv(i128 %lhs, i128 %rhs) {
 ; GFX9-O0-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:44 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v12, off, s[0:3], s32 offset:48 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_mov_b64 s[6:7], 1
 ; GFX9-O0-NEXT:    s_mov_b32 s5, s6
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
index 4b02d00ddce1ef..1906891b01fd74 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
@@ -285,15 +285,15 @@ define <2 x i128> @v_shl_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
 ; GCN-NEXT:    v_lshr_b64 v[16:17], v[0:1], v16
 ; GCN-NEXT:    v_lshl_b64 v[18:19], v[2:3], v8
-; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_subrev_i32_e64 v16, s[6:7], 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
 ; GCN-NEXT:    v_lshl_b64 v[16:17], v[0:1], v16
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
+; GCN-NEXT:    s_and_b64 vcc, vcc, s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e32 v16, v16, v18, vcc
 ; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
@@ -303,14 +303,14 @@ define <2 x i128> @v_shl_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_lshl_b64 v[16:17], v[6:7], v12
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s[4:5]
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
-; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[14:15]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
 ; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_lshl_b64 v[9:10], v[4:5], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
-; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
 ; GCN-NEXT:    v_lshl_b64 v[0:1], v[0:1], v8
@@ -334,15 +334,15 @@ define <2 x i128> @v_lshr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
 ; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
 ; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
-; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_subrev_i32_e64 v16, s[6:7], 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
 ; GCN-NEXT:    v_lshr_b64 v[16:17], v[2:3], v16
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
+; GCN-NEXT:    s_and_b64 vcc, vcc, s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e32 v16, v16, v18, vcc
 ; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
@@ -352,14 +352,14 @@ define <2 x i128> @v_lshr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
-; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[14:15]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
 ; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_lshr_b64 v[9:10], v[6:7], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
-; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
 ; GCN-NEXT:    v_lshr_b64 v[2:3], v[2:3], v8
@@ -383,15 +383,15 @@ define <2 x i128> @v_ashr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
 ; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
 ; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
-; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
 ; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[8:9]
 ; GCN-NEXT:    v_subrev_i32_e64 v16, s[6:7], 64, v8
 ; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
 ; GCN-NEXT:    v_ashr_i64 v[16:17], v[2:3], v16
 ; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
 ; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
-; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
+; GCN-NEXT:    s_and_b64 vcc, vcc, s[4:5]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
 ; GCN-NEXT:    v_cndmask_b32_e32 v16, v16, v18, vcc
 ; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
@@ -401,14 +401,14 @@ define <2 x i128> @v_ashr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
 ; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
-; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[14:15]
+; GCN-NEXT:    v_cmp_gt_u64_e64 s[6:7], 64, v[12:13]
 ; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
 ; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
 ; GCN-NEXT:    v_ashr_i64 v[9:10], v[6:7], v9
 ; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
 ; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
-; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
+; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
 ; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v4, s[6:7]
@@ -436,12 +436,12 @@ define amdgpu_kernel void @s_shl_v2i128ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[8:9], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[8:9], 64
 ; GCN-NEXT:    s_sub_i32 s22, 64, s8
 ; GCN-NEXT:    s_sub_i32 s20, s8, 64
 ; GCN-NEXT:    s_lshr_b64 s[22:23], s[0:1], s22
-; GCN-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-NEXT:    s_and_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-NEXT:    s_lshl_b64 s[18:19], s[2:3], s8
 ; GCN-NEXT:    s_lshl_b64 s[20:21], s[0:1], s20
 ; GCN-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
@@ -454,10 +454,10 @@ define amdgpu_kernel void @s_shl_v2i128ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    s_and_b64 s[22:23], s[16:17], exec
 ; GCN-NEXT:    s_cselect_b32 s3, s18, s20
 ; GCN-NEXT:    s_and_b64 s[10:11], s[10:11], exec
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[10:11], s[12:13], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[14:15], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[12:13], 64
 ; GCN-NEXT:    s_cselect_b32 s22, s2, s3
-; GCN-NEXT:    s_and_b64 s[2:3], s[18:19], s[10:11]
+; GCN-NEXT:    s_and_b64 s[2:3], s[10:11], s[18:19]
 ; GCN-NEXT:    s_sub_i32 s18, 64, s12
 ; GCN-NEXT:    s_sub_i32 s10, s12, 64
 ; GCN-NEXT:    s_lshr_b64 s[18:19], s[4:5], s18
@@ -508,12 +508,12 @@ define amdgpu_kernel void @s_lshr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[8:9], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[8:9], 64
 ; GCN-NEXT:    s_sub_i32 s22, 64, s8
 ; GCN-NEXT:    s_sub_i32 s20, s8, 64
 ; GCN-NEXT:    s_lshl_b64 s[22:23], s[2:3], s22
-; GCN-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-NEXT:    s_and_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-NEXT:    s_lshr_b64 s[18:19], s[0:1], s8
 ; GCN-NEXT:    s_lshr_b64 s[20:21], s[2:3], s20
 ; GCN-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
@@ -526,10 +526,10 @@ define amdgpu_kernel void @s_lshr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    s_and_b64 s[22:23], s[16:17], exec
 ; GCN-NEXT:    s_cselect_b32 s1, s18, s20
 ; GCN-NEXT:    s_and_b64 s[10:11], s[10:11], exec
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[10:11], s[12:13], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[14:15], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[12:13], 64
 ; GCN-NEXT:    s_cselect_b32 s22, s0, s1
-; GCN-NEXT:    s_and_b64 s[0:1], s[18:19], s[10:11]
+; GCN-NEXT:    s_and_b64 s[0:1], s[10:11], s[18:19]
 ; GCN-NEXT:    s_sub_i32 s18, 64, s12
 ; GCN-NEXT:    s_sub_i32 s10, s12, 64
 ; GCN-NEXT:    s_lshl_b64 s[18:19], s[6:7], s18
@@ -580,12 +580,12 @@ define amdgpu_kernel void @s_ashr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[16:17], s[8:9], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[8:9], 64
 ; GCN-NEXT:    s_sub_i32 s22, 64, s8
 ; GCN-NEXT:    s_sub_i32 s20, s8, 64
 ; GCN-NEXT:    s_lshl_b64 s[22:23], s[2:3], s22
-; GCN-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-NEXT:    s_and_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-NEXT:    s_lshr_b64 s[18:19], s[0:1], s8
 ; GCN-NEXT:    s_ashr_i64 s[20:21], s[2:3], s20
 ; GCN-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
@@ -598,10 +598,10 @@ define amdgpu_kernel void @s_ashr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
 ; GCN-NEXT:    s_and_b64 s[22:23], s[16:17], exec
 ; GCN-NEXT:    s_cselect_b32 s1, s18, s20
 ; GCN-NEXT:    s_and_b64 s[10:11], s[10:11], exec
-; GCN-NEXT:    v_cmp_lt_u64_e64 s[10:11], s[12:13], 64
-; GCN-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 0
+; GCN-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[14:15], 0
+; GCN-NEXT:    v_cmp_lt_u64_e64 s[18:19], s[12:13], 64
 ; GCN-NEXT:    s_cselect_b32 s22, s0, s1
-; GCN-NEXT:    s_and_b64 s[0:1], s[18:19], s[10:11]
+; GCN-NEXT:    s_and_b64 s[0:1], s[10:11], s[18:19]
 ; GCN-NEXT:    s_sub_i32 s18, 64, s12
 ; GCN-NEXT:    s_sub_i32 s10, s12, 64
 ; GCN-NEXT:    s_lshl_b64 s[18:19], s[6:7], s18
diff --git a/llvm/test/CodeGen/PowerPC/pr40922.ll b/llvm/test/CodeGen/PowerPC/pr40922.ll
index 9252e9a3e3aa4f..2d9add6a198579 100644
--- a/llvm/test/CodeGen/PowerPC/pr40922.ll
+++ b/llvm/test/CodeGen/PowerPC/pr40922.ll
@@ -23,11 +23,12 @@ define i32 @a() {
 ; CHECK-NEXT:    li 5, 0
 ; CHECK-NEXT:    mr 30, 3
 ; CHECK-NEXT:    addic 6, 4, 6
-; CHECK-NEXT:    addze 5, 5
 ; CHECK-NEXT:    rlwinm 6, 6, 0, 28, 26
-; CHECK-NEXT:    andi. 5, 5, 1
+; CHECK-NEXT:    addze 5, 5
 ; CHECK-NEXT:    cmplw 1, 6, 4
-; CHECK-NEXT:    crorc 20, 1, 4
+; CHECK-NEXT:    andi. 5, 5, 1
+; CHECK-NEXT:    crnot 20, 4
+; CHECK-NEXT:    cror 20, 1, 20
 ; CHECK-NEXT:    bc 12, 20, .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %if.then
 ; CHECK-NEXT:    bl e
diff --git a/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
index 3fa494e1a57ddc..a80379eab6100f 100644
--- a/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
+++ b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
@@ -20,12 +20,13 @@ define i1 @pr84653(i32 %x) {
 ; CHECK-ZBB-LABEL: pr84653:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    sext.w a1, a0
-; CHECK-ZBB-NEXT:    lui a2, 524288
-; CHECK-ZBB-NEXT:    addi a2, a2, -1
-; CHECK-ZBB-NEXT:    xor a0, a0, a2
+; CHECK-ZBB-NEXT:    sgtz a2, a1
+; CHECK-ZBB-NEXT:    lui a3, 524288
+; CHECK-ZBB-NEXT:    addi a3, a3, -1
+; CHECK-ZBB-NEXT:    xor a0, a0, a3
 ; CHECK-ZBB-NEXT:    sext.w a0, a0
-; CHECK-ZBB-NEXT:    max a0, a0, zero
 ; CHECK-ZBB-NEXT:    slt a0, a0, a1
+; CHECK-ZBB-NEXT:    and a0, a2, a0
 ; CHECK-ZBB-NEXT:    ret
   %cmp1 = icmp sgt i32 %x, 0
   %sub = sub nsw i32 2147483647, %x  ; 0x7fffffff
@@ -51,10 +52,12 @@ define i1 @pr85190(i64 %a) {
 ; CHECK-ZBB-LABEL: pr85190:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    ori a1, a0, 7
-; CHECK-ZBB-NEXT:    li a2, -1
-; CHECK-ZBB-NEXT:    slli a2, a2, 63
-; CHECK-ZBB-NEXT:    sub a2, a2, a1
-; CHECK-ZBB-NEXT:    slt a0, a0, a2
+; CHECK-ZBB-NEXT:    slti a2, a0, 0
+; CHECK-ZBB-NEXT:    li a3, -1
+; CHECK-ZBB-NEXT:    slli a3, a3, 63
+; CHECK-ZBB-NEXT:    sub a3, a3, a1
+; CHECK-ZBB-NEXT:    slt a0, a0, a3
+; CHECK-ZBB-NEXT:    and a0, a2, a0
 ; CHECK-ZBB-NEXT:    ret
   %or = or i64 %a, 7
   %cmp1 = icmp slt i64 %a, 0
@@ -80,12 +83,13 @@ define i1 @select_to_or(i32 %x) {
 ; CHECK-ZBB-LABEL: select_to_or:
 ; CHECK-ZBB:       # %bb.0:
 ; CHECK-ZBB-NEXT:    sext.w a1, a0
-; CHECK-ZBB-NEXT:    lui a2, 524288
-; CHECK-ZBB-NEXT:    addi a2, a2, -1
-; CHECK-ZBB-NEXT:    xor a0, a0, a2
+; CHECK-ZBB-NEXT:    sgtz a2, a1
+; CHECK-ZBB-NEXT:    lui a3, 524288
+; CHECK-ZBB-NEXT:    addi a3, a3, -1
+; CHECK-ZBB-NEXT:    xor a0, a0, a3
 ; CHECK-ZBB-NEXT:    sext.w a0, a0
-; CHECK-ZBB-NEXT:    min a0, a0, zero
 ; CHECK-ZBB-NEXT:    slt a0, a0, a1
+; CHECK-ZBB-NEXT:    or a0, a2, a0
 ; CHECK-ZBB-NEXT:    ret
   %cmp1 = icmp sgt i32 %x, 0
   %sub = sub nsw i32 2147483647, %x  ; 0x7fffffff
diff --git a/llvm/test/CodeGen/SystemZ/pr60413.ll b/llvm/test/CodeGen/SystemZ/pr60413.ll
index 5a629567d07069..aae254359f334d 100644
--- a/llvm/test/CodeGen/SystemZ/pr60413.ll
+++ b/llvm/test/CodeGen/SystemZ/pr60413.ll
@@ -13,114 +13,91 @@ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
 define dso_local void @m() local_unnamed_addr #1 {
 ; CHECK-LABEL: m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    stmg %r12, %r15, 96(%r15)
+; CHECK-NEXT:    stmg %r14, %r15, 112(%r15)
 ; CHECK-NEXT:    aghi %r15, -168
-; CHECK-NEXT:    llhrl %r2, f+4
-; CHECK-NEXT:    sll %r2, 8
-; CHECK-NEXT:    larl %r1, f
-; CHECK-NEXT:    ic %r2, 6(%r1)
-; CHECK-NEXT:    larl %r1, e
-; CHECK-NEXT:    lb %r0, 3(%r1)
-; CHECK-NEXT:    clfi %r2, 128
+; CHECK-NEXT:    lhrl %r1, f+4
+; CHECK-NEXT:    sll %r1, 8
+; CHECK-NEXT:    larl %r2, f
+; CHECK-NEXT:    ic %r1, 6(%r2)
+; CHECK-NEXT:    larl %r2, e
+; CHECK-NEXT:    lb %r0, 3(%r2)
+; CHECK-NEXT:    vlvgp %v0, %r1, %r1
+; CHECK-NEXT:    nilh %r1, 255
+; CHECK-NEXT:    chi %r1, 128
 ; CHECK-NEXT:    ipm %r1
 ; CHECK-NEXT:    risbg %r1, %r1, 63, 191, 36
-; CHECK-NEXT:    vlvgp %v1, %r2, %r0
-; CHECK-NEXT:    vlvgf %v1, %r2, 0
-; CHECK-NEXT:    vlvgf %v1, %r2, 2
-; CHECK-NEXT:    vlvgp %v0, %r0, %r2
-; CHECK-NEXT:    vlvgp %v2, %r2, %r2
-; CHECK-NEXT:    # kill: def $r2l killed $r2l killed $r2d
-; CHECK-NEXT:    nilh %r2, 255
-; CHECK-NEXT:    chi %r2, 128
-; CHECK-NEXT:    ipm %r2
-; CHECK-NEXT:    risbg %r2, %r2, 63, 191, 36
-; CHECK-NEXT:    vlvgf %v0, %r0, 0
-; CHECK-NEXT:    vlvgf %v0, %r0, 2
-; CHECK-NEXT:    vrepf %v2, %v2, 1
-; CHECK-NEXT:    vgbm %v3, 30583
-; CHECK-NEXT:    vn %v0, %v0, %v3
-; CHECK-NEXT:    vn %v1, %v1, %v3
-; CHECK-NEXT:    vn %v2, %v2, %v3
-; CHECK-NEXT:    vrepif %v3, 127
-; CHECK-NEXT:    vchlf %v1, %v1, %v3
-; CHECK-NEXT:    vlgvf %r12, %v1, 0
-; CHECK-NEXT:    vchlf %v2, %v2, %v3
-; CHECK-NEXT:    vlgvf %r4, %v2, 1
-; CHECK-NEXT:    nilf %r4, 1
-; CHECK-NEXT:    vlgvf %r5, %v2, 0
-; CHECK-NEXT:    risbg %r3, %r5, 48, 176, 15
-; CHECK-NEXT:    rosbg %r3, %r4, 32, 49, 14
-; CHECK-NEXT:    vlgvf %r14, %v2, 2
-; CHECK-NEXT:    nilf %r14, 1
-; CHECK-NEXT:    rosbg %r3, %r14, 32, 50, 13
-; CHECK-NEXT:    vlgvf %r13, %v2, 3
-; CHECK-NEXT:    nilf %r13, 1
-; CHECK-NEXT:    rosbg %r3, %r13, 32, 51, 12
-; CHECK-NEXT:    rosbg %r3, %r12, 52, 52, 11
-; CHECK-NEXT:    vlgvf %r12, %v1, 1
-; CHECK-NEXT:    rosbg %r3, %r12, 53, 53, 10
-; CHECK-NEXT:    vlgvf %r12, %v1, 2
-; CHECK-NEXT:    rosbg %r3, %r12, 54, 54, 9
-; CHECK-NEXT:    vlgvf %r12, %v1, 3
-; CHECK-NEXT:    rosbg %r3, %r12, 55, 55, 8
-; CHECK-NEXT:    vchlf %v0, %v0, %v3
-; CHECK-NEXT:    vlgvf %r12, %v0, 0
-; CHECK-NEXT:    rosbg %r3, %r12, 56, 56, 7
-; CHECK-NEXT:    vlgvf %r12, %v0, 1
-; CHECK-NEXT:    rosbg %r3, %r12, 57, 57, 6
-; CHECK-NEXT:    vlgvf %r12, %v0, 2
-; CHECK-NEXT:    rosbg %r3, %r12, 58, 58, 5
-; CHECK-NEXT:    vlgvf %r12, %v0, 3
-; CHECK-NEXT:    rosbg %r3, %r12, 59, 59, 4
+; CHECK-NEXT:    vrepf %v0, %v0, 1
+; CHECK-NEXT:    vgbm %v1, 30583
+; CHECK-NEXT:    vn %v0, %v0, %v1
+; CHECK-NEXT:    vrepif %v1, 127
+; CHECK-NEXT:    vchlf %v0, %v0, %v1
+; CHECK-NEXT:    vlgvf %r3, %v0, 1
+; CHECK-NEXT:    nilf %r3, 1
+; CHECK-NEXT:    vlgvf %r4, %v0, 0
+; CHECK-NEXT:    risbg %r2, %r4, 48, 176, 15
+; CHECK-NEXT:    rosbg %r2, %r3, 32, 49, 14
+; CHECK-NEXT:    vlgvf %r5, %v0, 2
 ; CHECK-NEXT:    nilf %r5, 1
-; CHECK-NEXT:    rosbg %r3, %r5, 32, 60, 3
-; CHECK-NEXT:    rosbg %r3, %r4, 32, 61, 2
-; CHECK-NEXT:    rosbg %r3, %r14, 32, 62, 1
-; CHECK-NEXT:    or %r3, %r13
-; CHECK-NEXT:    vlgvb %r5, %v0, 1
-; CHECK-NEXT:    vlgvb %r4, %v0, 0
-; CHECK-NEXT:    risbg %r4, %r4, 48, 176, 15
-; CHECK-NEXT:    rosbg %r4, %r5, 49, 49, 14
-; CHECK-NEXT:    vlgvb %r5, %v0, 2
-; CHECK-NEXT:    rosbg %r4, %r5, 50, 50, 13
-; CHECK-NEXT:    vlgvb %r5, %v0, 3
-; CHECK-NEXT:    rosbg %r4, %r5, 51, 51, 12
-; CHECK-NEXT:    vlgvb %r5, %v0, 4
-; CHECK-NEXT:    rosbg %r4, %r5, 52, 52, 11
-; CHECK-NEXT:    vlgvb %r5, %v0, 5
-; CHECK-NEXT:    rosbg %r4, %r5, 53, 53, 10
-; CHECK-NEXT:    vlgvb %r5, %v0, 6
-; CHECK-NEXT:    rosbg %r4, %r5, 54, 54, 9
-; CHECK-NEXT:    vlgvb %r5, %v0, 7
-; CHECK-NEXT:    rosbg %r4, %r5, 55, 55, 8
-; CHECK-NEXT:    vlgvb %r5, %v0, 8
-; CHECK-NEXT:    rosbg %r4, %r5, 56, 56, 7
-; CHECK-NEXT:    vlgvb %r5, %v0, 9
-; CHECK-NEXT:    rosbg %r4, %r5, 57, 57, 6
-; CHECK-NEXT:    vlgvb %r5, %v0, 10
-; CHECK-NEXT:    rosbg %r4, %r5, 58, 58, 5
-; CHECK-NEXT:    vlgvb %r5, %v0, 11
-; CHECK-NEXT:    rosbg %r4, %r5, 59, 59, 4
-; CHECK-NEXT:    vlgvb %r5, %v0, 12
-; CHECK-NEXT:    rosbg %r4, %r5, 60, 60, 3
-; CHECK-NEXT:    vlgvb %r5, %v0, 13
-; CHECK-NEXT:    rosbg %r4, %r5, 61, 61, 2
-; CHECK-NEXT:    vlgvb %r5, %v0, 14
-; CHECK-NEXT:    rosbg %r4, %r5, 62, 62, 1
-; CHECK-NEXT:    vlgvb %r5, %v0, 15
-; CHECK-NEXT:    rosbg %r4, %r5, 63, 63, 0
-; CHECK-NEXT:    xilf %r4, 4294967295
-; CHECK-NEXT:    or %r4, %r3
-; CHECK-NEXT:    tmll %r4, 65535
-; CHECK-NEXT:    ipm %r3
-; CHECK-NEXT:    afi %r3, -268435456
-; CHECK-NEXT:    srl %r3, 31
+; CHECK-NEXT:    rosbg %r2, %r5, 32, 50, 13
+; CHECK-NEXT:    vlgvf %r14, %v0, 3
+; CHECK-NEXT:    nilf %r14, 1
+; CHECK-NEXT:    rosbg %r2, %r14, 32, 51, 12
+; CHECK-NEXT:    nilf %r4, 1
+; CHECK-NEXT:    rosbg %r2, %r4, 32, 52, 11
+; CHECK-NEXT:    rosbg %r2, %r3, 32, 53, 10
+; CHECK-NEXT:    rosbg %r2, %r5, 32, 54, 9
+; CHECK-NEXT:    rosbg %r2, %r14, 32, 55, 8
+; CHECK-NEXT:    rosbg %r2, %r4, 32, 56, 7
+; CHECK-NEXT:    rosbg %r2, %r3, 32, 57, 6
+; CHECK-NEXT:    rosbg %r2, %r5, 32, 58, 5
+; CHECK-NEXT:    rosbg %r2, %r14, 32, 59, 4
+; CHECK-NEXT:    rosbg %r2, %r4, 32, 60, 3
+; CHECK-NEXT:    rosbg %r2, %r3, 32, 61, 2
+; CHECK-NEXT:    rosbg %r2, %r5, 32, 62, 1
+; CHECK-NEXT:    or %r2, %r14
+; CHECK-NEXT:    vlgvb %r4, %v0, 1
+; CHECK-NEXT:    vlgvb %r3, %v0, 0
+; CHECK-NEXT:    risbg %r3, %r3, 48, 176, 15
+; CHECK-NEXT:    rosbg %r3, %r4, 49, 49, 14
+; CHECK-NEXT:    vlgvb %r4, %v0, 2
+; CHECK-NEXT:    rosbg %r3, %r4, 50, 50, 13
+; CHECK-NEXT:    vlgvb %r4, %v0, 3
+; CHECK-NEXT:    rosbg %r3, %r4, 51, 51, 12
+; CHECK-NEXT:    vlgvb %r4, %v0, 4
+; CHECK-NEXT:    rosbg %r3, %r4, 52, 52, 11
+; CHECK-NEXT:    vlgvb %r4, %v0, 5
+; CHECK-NEXT:    rosbg %r3, %r4, 53, 53, 10
+; CHECK-NEXT:    vlgvb %r4, %v0, 6
+; CHECK-NEXT:    rosbg %r3, %r4, 54, 54, 9
+; CHECK-NEXT:    vlgvb %r4, %v0, 7
+; CHECK-NEXT:    rosbg %r3, %r4, 55, 55, 8
+; CHECK-NEXT:    vlgvb %r4, %v0, 8
+; CHECK-NEXT:    rosbg %r3, %r4, 56, 56, 7
+; CHECK-NEXT:    vlgvb %r4, %v0, 9
+; CHECK-NEXT:    rosbg %r3, %r4, 57, 57, 6
+; CHECK-NEXT:    vlgvb %r4, %v0, 10
+; CHECK-NEXT:    rosbg %r3, %r4, 58, 58, 5
+; CHECK-NEXT:    vlgvb %r4, %v0, 11
+; CHECK-NEXT:    rosbg %r3, %r4, 59, 59, 4
+; CHECK-NEXT:    vlgvb %r4, %v0, 12
+; CHECK-NEXT:    rosbg %r3, %r4, 60, 60, 3
+; CHECK-NEXT:    vlgvb %r4, %v0, 13
+; CHECK-NEXT:    rosbg %r3, %r4, 61, 61, 2
+; CHECK-NEXT:    vlgvb %r4, %v0, 14
+; CHECK-NEXT:    rosbg %r3, %r4, 62, 62, 1
+; CHECK-NEXT:    vlgvb %r4, %v0, 15
+; CHECK-NEXT:    rosbg %r3, %r4, 63, 63, 0
+; CHECK-NEXT:    xilf %r3, 4294967295
+; CHECK-NEXT:    or %r3, %r2
+; CHECK-NEXT:    tmll %r3, 65535
+; CHECK-NEXT:    ipm %r2
+; CHECK-NEXT:    afi %r2, -268435456
+; CHECK-NEXT:    srl %r2, 31
 ; CHECK-NEXT:    nr %r2, %r1
-; CHECK-NEXT:    nr %r2, %r3
 ; CHECK-NEXT:    nr %r2, %r0
 ; CHECK-NEXT:    larl %r1, g
 ; CHECK-NEXT:    stc %r2, 0(%r1)
-; CHECK-NEXT:    lmg %r12, %r15, 264(%r15)
+; CHECK-NEXT:    lmg %r14, %r15, 280(%r15)
 ; CHECK-NEXT:    br %r14
 entry:
   %n = alloca i32, align 4
diff --git a/llvm/test/CodeGen/Thumb2/csel-andor-onebit.ll b/llvm/test/CodeGen/Thumb2/csel-andor-onebit.ll
index 5219c74e35817c..2d13c244166aa1 100644
--- a/llvm/test/CodeGen/Thumb2/csel-andor-onebit.ll
+++ b/llvm/test/CodeGen/Thumb2/csel-andor-onebit.ll
@@ -179,10 +179,10 @@ define i32 @t5(i32 %f.0, i32 %call) {
 ; CHECK-NEXT:    cset r1, ne
 ; CHECK-NEXT:    cmp r0, #13
 ; CHECK-NEXT:    cset r0, eq
-; CHECK-NEXT:    and.w r2, r0, r1
-; CHECK-NEXT:    orrs r0, r1
-; CHECK-NEXT:    eor r0, r0, #1
-; CHECK-NEXT:    orrs r0, r2
+; CHECK-NEXT:    orr.w r2, r0, r1
+; CHECK-NEXT:    ands r0, r1
+; CHECK-NEXT:    orn r0, r0, r2
+; CHECK-NEXT:    and r0, r0, #1
 ; CHECK-NEXT:    bx lr
 entry:
   %tobool1.i = icmp ne i32 %call, 0
diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll
index 12aa101cb48c4d..51da557c6c49fb 100644
--- a/llvm/test/CodeGen/VE/Scalar/max.ll
+++ b/llvm/test/CodeGen/VE/Scalar/max.ll
@@ -281,11 +281,13 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) {
 ; CHECK-LABEL: maxi1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    or %s0, %s0, %s1
+; CHECK-NEXT:    and %s0, 1, %s0
 ; CHECK-NEXT:    b.l.t (, %s10)
 ;
 ; OPT-LABEL: maxi1:
 ; OPT:       # %bb.0:
 ; OPT-NEXT:    or %s0, %s0, %s1
+; OPT-NEXT:    and %s0, 1, %s0
 ; OPT-NEXT:    b.l.t (, %s10)
   %3 = xor i1 %1, true
   %4 = and i1 %3, %0
diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll
index da92ebafd05903..69d5ce48601f8f 100644
--- a/llvm/test/CodeGen/VE/Scalar/min.ll
+++ b/llvm/test/CodeGen/VE/Scalar/min.ll
@@ -278,6 +278,7 @@ define i32 @min2u32(i32, i32) {
 define zeroext i1 @mini1(i1 zeroext, i1 zeroext) {
 ; CHECK-LABEL: mini1:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    and %s0, %s0, (32)0
 ; CHECK-NEXT:    and %s2, %s1, %s0
 ; CHECK-NEXT:    cmov.w.ne %s2, %s1, %s0
 ; CHECK-NEXT:    adds.w.zx %s0, %s2, (0)1
@@ -285,6 +286,7 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) {
 ;
 ; OPT-LABEL: mini1:
 ; OPT:       # %bb.0:
+; OPT-NEXT:    and %s0, %s0, (32)0
 ; OPT-NEXT:    and %s2, %s1, %s0
 ; OPT-NEXT:    cmov.w.ne %s2, %s1, %s0
 ; OPT-NEXT:    adds.w.zx %s0, %s2, (0)1
diff --git a/llvm/test/CodeGen/X86/avx512-logic.ll b/llvm/test/CodeGen/X86/avx512-logic.ll
index e53e194ba05c2a..ca79470d713493 100644
--- a/llvm/test/CodeGen/X86/avx512-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512-logic.ll
@@ -909,9 +909,9 @@ define <8 x i64> @ternlog_xor_and_mask(<8 x i64> %x, <8 x i64> %y) {
 define <16 x i32> @ternlog_maskz_or_and_mask(<16 x i32> %x, <16 x i32> %y, <16 x i32> %mask) {
 ; ALL-LABEL: ternlog_maskz_or_and_mask:
 ; ALL:       ## %bb.0:
-; ALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm3
-; ALL-NEXT:    vpsrad $31, %zmm2, %zmm0
-; ALL-NEXT:    vpternlogd $224, %zmm1, %zmm3, %zmm0
+; ALL-NEXT:    vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; ALL-NEXT:    vpsrad $31, %zmm2, %zmm1
+; ALL-NEXT:    vpandd %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
   %m = icmp slt <16 x i32> %mask, zeroinitializer
   %a = and <16 x i32> %x, <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>
@@ -923,9 +923,9 @@ define <16 x i32> @ternlog_maskz_or_and_mask(<16 x i32> %x, <16 x i32> %y, <16 x
 define <8 x i64> @ternlog_maskz_xor_and_mask(<8 x i64> %x, <8 x i64> %y, <8 x i64> %mask) {
 ; ALL-LABEL: ternlog_maskz_xor_and_mask:
 ; ALL:       ## %bb.0:
-; ALL-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm3
-; ALL-NEXT:    vpsraq $63, %zmm2, %zmm0
-; ALL-NEXT:    vpternlogq $96, %zmm1, %zmm3, %zmm0
+; ALL-NEXT:    vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm0
+; ALL-NEXT:    vpsraq $63, %zmm2, %zmm1
+; ALL-NEXT:    vpandq %zmm0, %zmm1, %zmm0
 ; ALL-NEXT:    retq
   %m = icmp slt <8 x i64> %mask, zeroinitializer
   %a = and <8 x i64> %x, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index 86ebb1e40870f8..3826c9a32da20e 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -396,8 +396,8 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, ptr %y.ptr) nounwind {
 define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) nounwind {
 ; CHECK-LABEL: test20:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf1,0x7d,0x48,0x76,0xc9]
-; CHECK-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1 {%k1} ## encoding: [0x62,0xf1,0x6d,0x49,0x76,0xcb]
+; CHECK-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1 ## encoding: [0x62,0xf1,0x6d,0x48,0x76,0xcb]
+; CHECK-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0x76,0xc9]
 ; CHECK-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x49,0x64,0xc0]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask1 = icmp eq <16 x i32> %x1, %y1
@@ -410,8 +410,8 @@ define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i3
 define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) nounwind {
 ; CHECK-LABEL: test21:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpleq %zmm1, %zmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x48,0x1f,0xc9,0x02]
-; CHECK-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1 {%k1} ## encoding: [0x62,0xf3,0xed,0x49,0x1f,0xcb,0x05]
+; CHECK-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x48,0x1f,0xcb,0x05]
+; CHECK-NEXT:    vpcmpleq %zmm1, %zmm0, %k1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x49,0x1f,0xc9,0x02]
 ; CHECK-NEXT:    vpblendmq %zmm0, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xed,0x49,0x64,0xc0]
 ; CHECK-NEXT:    retq ## encoding: [0xc3]
   %mask1 = icmp sge <8 x i64> %x1, %y1
diff --git a/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
index 500a71b7dde277..c5cb858286e821 100644
--- a/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -96,8 +96,8 @@ define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, ptr %y.ptr) nounwind {
 define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16> %y1) nounwind {
 ; CHECK-LABEL: test9:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1
-; CHECK-NEXT:    vpcmpeqw %zmm3, %zmm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpeqw %zmm3, %zmm2, %k1
+; CHECK-NEXT:    vpcmpeqw %zmm1, %zmm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp eq <32 x i16> %x1, %y1
@@ -110,8 +110,8 @@ define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16
 define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y1) nounwind {
 ; CHECK-LABEL: test10:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpleb %zmm1, %zmm0, %k1
-; CHECK-NEXT:    vpcmpnltb %zmm3, %zmm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpnltb %zmm3, %zmm2, %k1
+; CHECK-NEXT:    vpcmpleb %zmm1, %zmm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmb %zmm0, %zmm2, %zmm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp sge <64 x i8> %x1, %y1
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
index ee750834907463..a2f4a2046a74b3 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
@@ -96,8 +96,8 @@ define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, ptr %y.ptr) nounwind
 define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x i16> %y1) nounwind {
 ; CHECK-LABEL: test256_9:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpeqw %ymm1, %ymm0, %k1
-; CHECK-NEXT:    vpcmpeqw %ymm3, %ymm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpeqw %ymm3, %ymm2, %k1
+; CHECK-NEXT:    vpcmpeqw %ymm1, %ymm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp eq <16 x i16> %x1, %y1
@@ -110,8 +110,8 @@ define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x
 define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8> %y1) nounwind {
 ; CHECK-LABEL: test256_10:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpleb %ymm1, %ymm0, %k1
-; CHECK-NEXT:    vpcmpnltb %ymm3, %ymm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpnltb %ymm3, %ymm2, %k1
+; CHECK-NEXT:    vpcmpleb %ymm1, %ymm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp sge <32 x i8> %x1, %y1
@@ -246,8 +246,8 @@ define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, ptr %y.ptr) nounwind {
 define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16> %y1) nounwind {
 ; CHECK-LABEL: test128_9:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpeqw %xmm1, %xmm0, %k1
-; CHECK-NEXT:    vpcmpeqw %xmm3, %xmm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpeqw %xmm3, %xmm2, %k1
+; CHECK-NEXT:    vpcmpeqw %xmm1, %xmm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp eq <8 x i16> %x1, %y1
@@ -260,8 +260,8 @@ define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16>
 define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8> %y1) nounwind {
 ; CHECK-LABEL: test128_10:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpcmpleb %xmm1, %xmm0, %k1
-; CHECK-NEXT:    vpcmpnltb %xmm3, %xmm2, %k1 {%k1}
+; CHECK-NEXT:    vpcmpnltb %xmm3, %xmm2, %k1
+; CHECK-NEXT:    vpcmpleb %xmm1, %xmm0, %k1 {%k1}
 ; CHECK-NEXT:    vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
 ; CHECK-NEXT:    retq
   %mask1 = icmp sge <16 x i8> %x1, %y1
diff --git a/llvm/test/CodeGen/X86/avx512vl-logic.ll b/llvm/test/CodeGen/X86/avx512vl-logic.ll
index 58621967e2aca6..e345be8f3fca00 100644
--- a/llvm/test/CodeGen/X86/avx512vl-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-logic.ll
@@ -1079,9 +1079,9 @@ define <4 x i64> @ternlog_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y) {
 define <4 x i32> @ternlog_maskz_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z, <4 x i32> %mask) {
 ; CHECK-LABEL: ternlog_maskz_or_and_mask:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm2
-; CHECK-NEXT:    vpsrad $31, %xmm3, %xmm0
-; CHECK-NEXT:    vpternlogd $224, %xmm1, %xmm2, %xmm0
+; CHECK-NEXT:    vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; CHECK-NEXT:    vpsrad $31, %xmm3, %xmm1
+; CHECK-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %m = icmp slt <4 x i32> %mask, zeroinitializer
   %a = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
@@ -1093,9 +1093,9 @@ define <4 x i32> @ternlog_maskz_or_and_mask(<4 x i32> %x, <4 x i32> %y, <4 x i32
 define <8 x i32> @ternlog_maskz_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) {
 ; CHECK-LABEL: ternlog_maskz_or_and_mask_ymm:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm3
-; CHECK-NEXT:    vpsrad $31, %ymm2, %ymm0
-; CHECK-NEXT:    vpternlogd $224, %ymm1, %ymm3, %ymm0
+; CHECK-NEXT:    vpternlogd $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; CHECK-NEXT:    vpsrad $31, %ymm2, %ymm1
+; CHECK-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %m = icmp slt <8 x i32> %mask, zeroinitializer
   %a = and <8 x i32> %x, <i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216, i32 -16777216>
@@ -1107,9 +1107,9 @@ define <8 x i32> @ternlog_maskz_or_and_mask_ymm(<8 x i32> %x, <8 x i32> %y, <8 x
 define <2 x i64> @ternlog_maskz_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) {
 ; CHECK-LABEL: ternlog_maskz_xor_and_mask:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm3
-; CHECK-NEXT:    vpsraq $63, %xmm2, %xmm0
-; CHECK-NEXT:    vpternlogq $96, %xmm1, %xmm3, %xmm0
+; CHECK-NEXT:    vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm1, %xmm0
+; CHECK-NEXT:    vpsraq $63, %xmm2, %xmm1
+; CHECK-NEXT:    vpand %xmm0, %xmm1, %xmm0
 ; CHECK-NEXT:    retq
   %m = icmp slt <2 x i64> %mask, zeroinitializer
   %a = and <2 x i64> %x, <i64 1099511627775, i64 1099511627775>
@@ -1121,9 +1121,9 @@ define <2 x i64> @ternlog_maskz_xor_and_mask(<2 x i64> %x, <2 x i64> %y, <2 x i6
 define <4 x i64> @ternlog_maskz_xor_and_mask_ymm(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) {
 ; CHECK-LABEL: ternlog_maskz_xor_and_mask_ymm:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm3
-; CHECK-NEXT:    vpsraq $63, %ymm2, %ymm0
-; CHECK-NEXT:    vpternlogq $96, %ymm1, %ymm3, %ymm0
+; CHECK-NEXT:    vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm1, %ymm0
+; CHECK-NEXT:    vpsraq $63, %ymm2, %ymm1
+; CHECK-NEXT:    vpand %ymm0, %ymm1, %ymm0
 ; CHECK-NEXT:    retq
   %m = icmp slt <4 x i64> %mask, zeroinitializer
   %a = and <4 x i64> %x, <i64 72057594037927935, i64 72057594037927935, i64 72057594037927935, i64 72057594037927935>
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
index 5b09e45b6fcf17..12074c292512bb 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -264,8 +264,8 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, ptr %y.ptr) nounwind {
 define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32> %y1) nounwind {
 ; VLX-LABEL: test256_9:
 ; VLX:       # %bb.0:
-; VLX-NEXT:    vpcmpeqd %ymm1, %ymm0, %k1
-; VLX-NEXT:    vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
+; VLX-NEXT:    vpcmpeqd %ymm3, %ymm2, %k1
+; VLX-NEXT:    vpcmpeqd %ymm1, %ymm0, %k1 {%k1}
 ; VLX-NEXT:    vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
 ; VLX-NEXT:    retq
 ;
@@ -275,8 +275,8 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
 ; NoVLX-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1
+; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -290,8 +290,8 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
 define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) nounwind {
 ; VLX-LABEL: test256_10:
 ; VLX:       # %bb.0:
-; VLX-NEXT:    vpcmpleq %ymm1, %ymm0, %k1
-; VLX-NEXT:    vpcmpnltq %ymm3, %ymm2, %k1 {%k1}
+; VLX-NEXT:    vpcmpnltq %ymm3, %ymm2, %k1
+; VLX-NEXT:    vpcmpleq %ymm1, %ymm0, %k1 {%k1}
 ; VLX-NEXT:    vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
 ; VLX-NEXT:    retq
 ;
@@ -301,8 +301,8 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
 ; NoVLX-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; NoVLX-NEXT:    vpcmpleq %zmm1, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1
+; NoVLX-NEXT:    vpcmpleq %zmm1, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -326,9 +326,9 @@ define <4 x i64> @test256_11(<4 x i64> %x, ptr %y.ptr, <4 x i64> %x1, <4 x i64>
 ; NoVLX-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; NoVLX-NEXT:    vmovdqu (%rdi), %ymm3
-; NoVLX-NEXT:    vpcmpgtq %zmm3, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm1, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm1, %k1
+; NoVLX-NEXT:    vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -353,9 +353,9 @@ define <8 x i32> @test256_12(<8 x i32> %x, ptr %y.ptr, <8 x i32> %x1, <8 x i32>
 ; NoVLX-NEXT:    # kill: def $ymm2 killed $ymm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
-; NoVLX-NEXT:    vmovdqu (%rdi), %ymm3
-; NoVLX-NEXT:    vpcmpleud %zmm3, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpnltd %zmm2, %zmm1, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpnltd %zmm2, %zmm1, %k1
+; NoVLX-NEXT:    vmovdqu (%rdi), %ymm2
+; NoVLX-NEXT:    vpcmpleud %zmm2, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -819,8 +819,8 @@ define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, ptr %y.ptr) nounwind {
 define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32> %y1) nounwind {
 ; VLX-LABEL: test128_9:
 ; VLX:       # %bb.0:
-; VLX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k1
-; VLX-NEXT:    vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
+; VLX-NEXT:    vpcmpeqd %xmm3, %xmm2, %k1
+; VLX-NEXT:    vpcmpeqd %xmm1, %xmm0, %k1 {%k1}
 ; VLX-NEXT:    vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
 ; VLX-NEXT:    retq
 ;
@@ -830,8 +830,8 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
 ; NoVLX-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpeqd %zmm3, %zmm2, %k1
+; NoVLX-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -845,8 +845,8 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
 define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) nounwind {
 ; VLX-LABEL: test128_10:
 ; VLX:       # %bb.0:
-; VLX-NEXT:    vpcmpleq %xmm1, %xmm0, %k1
-; VLX-NEXT:    vpcmpnltq %xmm3, %xmm2, %k1 {%k1}
+; VLX-NEXT:    vpcmpnltq %xmm3, %xmm2, %k1
+; VLX-NEXT:    vpcmpleq %xmm1, %xmm0, %k1 {%k1}
 ; VLX-NEXT:    vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
 ; VLX-NEXT:    retq
 ;
@@ -856,8 +856,8 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
 ; NoVLX-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; NoVLX-NEXT:    vpcmpleq %zmm1, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpnltq %zmm3, %zmm2, %k1
+; NoVLX-NEXT:    vpcmpleq %zmm1, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -881,9 +881,9 @@ define <2 x i64> @test128_11(<2 x i64> %x, ptr %y.ptr, <2 x i64> %x1, <2 x i64>
 ; NoVLX-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; NoVLX-NEXT:    vmovdqu (%rdi), %xmm3
-; NoVLX-NEXT:    vpcmpgtq %zmm3, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm1, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm1, %k1
+; NoVLX-NEXT:    vmovdqu (%rdi), %xmm2
+; NoVLX-NEXT:    vpcmpgtq %zmm2, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; NoVLX-NEXT:    retq
@@ -908,9 +908,9 @@ define <4 x i32> @test128_12(<4 x i32> %x, ptr %y.ptr, <4 x i32> %x1, <4 x i32>
 ; NoVLX-NEXT:    # kill: def $xmm2 killed $xmm2 def $zmm2
 ; NoVLX-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
-; NoVLX-NEXT:    vmovdqu (%rdi), %xmm3
-; NoVLX-NEXT:    vpcmpleud %zmm3, %zmm0, %k1
-; NoVLX-NEXT:    vpcmpnltd %zmm2, %zmm1, %k1 {%k1}
+; NoVLX-NEXT:    vpcmpnltd %zmm2, %zmm1, %k1
+; NoVLX-NEXT:    vmovdqu (%rdi), %xmm2
+; NoVLX-NEXT:    vpcmpleud %zmm2, %zmm0, %k1 {%k1}
 ; NoVLX-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
 ; NoVLX-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
 ; NoVLX-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index fa45afbb634c4d..1a0207cba9d806 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -178,13 +178,13 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    subl $136, %esp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    orl %edi, %eax
-; X86-NEXT:    movl %ebp, %ecx
-; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    orl %edx, %ecx
+; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    orl %eax, %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    sete %bl
@@ -195,30 +195,27 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    orb %bl, %al
 ; X86-NEXT:    movb %al, (%esp) # 1-byte Spill
-; X86-NEXT:    bsrl %edi, %edx
+; X86-NEXT:    bsrl %esi, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    bsrl %esi, %ecx
+; X86-NEXT:    bsrl %edi, %ecx
 ; X86-NEXT:    xorl $31, %ecx
 ; X86-NEXT:    addl $32, %ecx
-; X86-NEXT:    testl %edi, %edi
-; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    testl %esi, %esi
 ; X86-NEXT:    cmovnel %edx, %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    bsrl %eax, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    bsrl %ebp, %ebp
-; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    bsrl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl %esi, %ebx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    xorl $31, %ebp
 ; X86-NEXT:    addl $32, %ebp
 ; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    cmovnel %edx, %ebp
 ; X86-NEXT:    addl $64, %ebp
-; X86-NEXT:    movl %edi, %edx
-; X86-NEXT:    orl %ebx, %edx
+; X86-NEXT:    orl %ebx, %edi
 ; X86-NEXT:    cmovnel %ecx, %ebp
 ; X86-NEXT:    bsrl %esi, %edx
-; X86-NEXT:    movl %esi, %ebx
 ; X86-NEXT:    xorl $31, %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    bsrl %eax, %ecx
@@ -235,15 +232,16 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    testl %edi, %edi
 ; X86-NEXT:    cmovnel %esi, %edx
 ; X86-NEXT:    addl $64, %edx
-; X86-NEXT:    orl %ebx, %eax
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    cmovnel %ecx, %edx
 ; X86-NEXT:    subl %edx, %ebp
 ; X86-NEXT:    movl $0, %edx
 ; X86-NEXT:    sbbl %edx, %edx
 ; X86-NEXT:    movl $0, %esi
 ; X86-NEXT:    sbbl %esi, %esi
-; X86-NEXT:    movl $0, %edi
-; X86-NEXT:    sbbl %edi, %edi
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
 ; X86-NEXT:    movl $127, %ecx
 ; X86-NEXT:    cmpl %ebp, %ecx
 ; X86-NEXT:    movl $0, %ecx
@@ -251,7 +249,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    movl $0, %ecx
 ; X86-NEXT:    sbbl %esi, %ecx
 ; X86-NEXT:    movl $0, %ecx
-; X86-NEXT:    sbbl %edi, %ecx
+; X86-NEXT:    sbbl %ebx, %ecx
 ; X86-NEXT:    setb %cl
 ; X86-NEXT:    orb (%esp), %cl # 1-byte Folded Reload
 ; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -260,18 +258,19 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    orl %esi, %eax
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    orl %edi, %edx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %ebx, %edx
 ; X86-NEXT:    orl %eax, %edx
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    testb %cl, %cl
-; X86-NEXT:    movl %ebx, %edx
-; X86-NEXT:    movl $0, %edi
-; X86-NEXT:    cmovnel %edi, %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    cmovnel %ebx, %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmovnel %edi, %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT:    cmovnel %edi, %ebp
+; X86-NEXT:    cmovnel %ebx, %esi
+; X86-NEXT:    movl %edi, %ebp
+; X86-NEXT:    cmovnel %ebx, %ebp
+; X86-NEXT:    movl $0, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-NEXT:    cmovnel %edi, %ebx
 ; X86-NEXT:    orb %cl, %al
diff --git a/llvm/test/CodeGen/X86/fcmp-logic.ll b/llvm/test/CodeGen/X86/fcmp-logic.ll
index 4953d004e65c57..82c43f0d985be2 100644
--- a/llvm/test/CodeGen/X86/fcmp-logic.ll
+++ b/llvm/test/CodeGen/X86/fcmp-logic.ll
@@ -371,8 +371,8 @@ define i1 @f32cmp3(float %x, float %y, float %z, float %w) {
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    xorps %xmm4, %xmm4
 ; SSE2-NEXT:    xorps %xmm5, %xmm5
-; SSE2-NEXT:    cmpltps %xmm1, %xmm5
-; SSE2-NEXT:    cmpltps %xmm0, %xmm4
+; SSE2-NEXT:    cmpltps %xmm0, %xmm5
+; SSE2-NEXT:    cmpltps %xmm1, %xmm4
 ; SSE2-NEXT:    orps %xmm5, %xmm4
 ; SSE2-NEXT:    movd %xmm4, %ecx
 ; SSE2-NEXT:    ucomiss %xmm2, %xmm3
@@ -383,8 +383,8 @@ define i1 @f32cmp3(float %x, float %y, float %z, float %w) {
 ; AVX1-LABEL: f32cmp3:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vxorps %xmm4, %xmm4, %xmm4
-; AVX1-NEXT:    vcmpltps %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vcmpltps %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vcmpltps %xmm1, %xmm4, %xmm1
 ; AVX1-NEXT:    vorps %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %ecx
 ; AVX1-NEXT:    vucomiss %xmm2, %xmm3
@@ -397,9 +397,9 @@ define i1 @f32cmp3(float %x, float %y, float %z, float %w) {
 ; AVX512-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
 ; AVX512-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
 ; AVX512-NEXT:    vxorps %xmm4, %xmm4, %xmm4
-; AVX512-NEXT:    vcmpltps %zmm1, %zmm4, %k0
-; AVX512-NEXT:    vcmpltps %zmm0, %zmm4, %k1
-; AVX512-NEXT:    korw %k0, %k1, %k0
+; AVX512-NEXT:    vcmpltps %zmm0, %zmm4, %k0
+; AVX512-NEXT:    vcmpltps %zmm1, %zmm4, %k1
+; AVX512-NEXT:    korw %k1, %k0, %k0
 ; AVX512-NEXT:    kmovw %k0, %ecx
 ; AVX512-NEXT:    vucomiss %xmm2, %xmm3
 ; AVX512-NEXT:    seta %al
diff --git a/llvm/test/CodeGen/X86/fold-select.ll b/llvm/test/CodeGen/X86/fold-select.ll
index 31afe979a33b35..fff7136219714d 100644
--- a/llvm/test/CodeGen/X86/fold-select.ll
+++ b/llvm/test/CodeGen/X86/fold-select.ll
@@ -59,11 +59,11 @@ define <8 x float> @select_or_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, <8 x f
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllw $15, %xmm2, %xmm2
 ; CHECK-NEXT:    vpmovw2m %xmm2, %k0
-; CHECK-NEXT:    vpsllw $15, %xmm1, %xmm1
-; CHECK-NEXT:    vpmovw2m %xmm1, %k1
 ; CHECK-NEXT:    vpsllw $15, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovw2m %xmm0, %k1
+; CHECK-NEXT:    vpsllw $15, %xmm1, %xmm0
 ; CHECK-NEXT:    vpmovw2m %xmm0, %k2
-; CHECK-NEXT:    kandnb %k1, %k2, %k1
+; CHECK-NEXT:    kandnb %k2, %k1, %k1
 ; CHECK-NEXT:    korb %k1, %k0, %k1
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
 ; CHECK-NEXT:    vmovaps %ymm3, %ymm0 {%k1}
@@ -98,9 +98,9 @@ define <8 x float> @select_or_v8i1_2(i8 %m1, i8 %m2, i8 %m3, <8 x float> %d) {
 define <8 x float> @select_or_v8i1_3(<8 x i16> %m1, <8 x i16> %m2, <8 x i16> %m3, <8 x float> %d) {
 ; CHECK-LABEL: select_or_v8i1_3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpcmpneqw %xmm1, %xmm0, %k1
+; CHECK-NEXT:    vpcmpeqw %xmm2, %xmm0, %k1
 ; CHECK-NEXT:    vpcmpeqw %xmm2, %xmm1, %k0
-; CHECK-NEXT:    vpcmpeqw %xmm2, %xmm0, %k1 {%k1}
+; CHECK-NEXT:    vpcmpneqw %xmm1, %xmm0, %k1 {%k1}
 ; CHECK-NEXT:    korb %k1, %k0, %k1
 ; CHECK-NEXT:    vbroadcastss {{.*#+}} ymm0 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
 ; CHECK-NEXT:    vmovaps %ymm3, %ymm0 {%k1}
diff --git a/llvm/test/CodeGen/X86/pr64589.ll b/llvm/test/CodeGen/X86/pr64589.ll
index 130ef517ae28eb..d93d54f4c31d0b 100644
--- a/llvm/test/CodeGen/X86/pr64589.ll
+++ b/llvm/test/CodeGen/X86/pr64589.ll
@@ -7,8 +7,8 @@
 define i8 @test(ptr %p) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzbl (%rdi), %eax
-; CHECK-NEXT:    orb 1(%rdi), %al
+; CHECK-NEXT:    movzbl 1(%rdi), %eax
+; CHECK-NEXT:    orb (%rdi), %al
 ; CHECK-NEXT:    setne %al
 ; CHECK-NEXT:    addb %al, %al
 ; CHECK-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/vector-compare-all_of.ll b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
index ec7dca4285a355..7c616b15880572 100644
--- a/llvm/test/CodeGen/X86/vector-compare-all_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-all_of.ll
@@ -1543,34 +1543,49 @@ define i1 @select_v2i8(ptr %s0, ptr %s1) {
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT:    movmskpd %xmm0, %eax
-; SSE2-NEXT:    cmpl $3, %eax
-; SSE2-NEXT:    sete %al
+; SSE2-NEXT:    movmskpd %xmm0, %ecx
+; SSE2-NEXT:    movl %ecx, %eax
+; SSE2-NEXT:    andb $2, %al
+; SSE2-NEXT:    shrb %al
+; SSE2-NEXT:    andb %cl, %al
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: select_v2i8:
 ; SSE42:       # %bb.0:
 ; SSE42-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE42-NEXT:    pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; SSE42-NEXT:    pxor %xmm0, %xmm1
-; SSE42-NEXT:    ptest %xmm1, %xmm1
-; SSE42-NEXT:    sete %al
+; SSE42-NEXT:    pcmpeqq %xmm0, %xmm1
+; SSE42-NEXT:    movmskpd %xmm1, %ecx
+; SSE42-NEXT:    movl %ecx, %eax
+; SSE42-NEXT:    andb $2, %al
+; SSE42-NEXT:    shrb %al
+; SSE42-NEXT:    andb %cl, %al
 ; SSE42-NEXT:    retq
 ;
 ; AVX1OR2-LABEL: select_v2i8:
 ; AVX1OR2:       # %bb.0:
 ; AVX1OR2-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1OR2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
-; AVX1OR2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1OR2-NEXT:    vptest %xmm0, %xmm0
-; AVX1OR2-NEXT:    sete %al
+; AVX1OR2-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vmovmskpd %xmm0, %ecx
+; AVX1OR2-NEXT:    movl %ecx, %eax
+; AVX1OR2-NEXT:    andb $2, %al
+; AVX1OR2-NEXT:    shrb %al
+; AVX1OR2-NEXT:    andb %cl, %al
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: select_v2i8:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movzwl (%rdi), %eax
-; AVX512-NEXT:    cmpw (%rsi), %ax
-; AVX512-NEXT:    sete %al
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    movzwl (%rsi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512-NEXT:    kmovd %k1, %ecx
+; AVX512-NEXT:    kmovd %k0, %eax
+; AVX512-NEXT:    andb %cl, %al
+; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %v0 = load <2 x i8>, ptr %s0, align 1
   %v1 = load <2 x i8>, ptr %s1, align 1
diff --git a/llvm/test/CodeGen/X86/vector-compare-any_of.ll b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
index 951bcfa8fc1b74..d684b8a53eef16 100644
--- a/llvm/test/CodeGen/X86/vector-compare-any_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
@@ -1426,9 +1426,11 @@ define i1 @select_v2i8(ptr %s0, ptr %s1) {
 ; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm1[0,0,1,1,4,5,6,7]
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT:    movmskpd %xmm0, %eax
-; SSE2-NEXT:    testl %eax, %eax
-; SSE2-NEXT:    setne %al
+; SSE2-NEXT:    movmskpd %xmm0, %ecx
+; SSE2-NEXT:    movl %ecx, %eax
+; SSE2-NEXT:    andb $2, %al
+; SSE2-NEXT:    shrb %al
+; SSE2-NEXT:    orb %cl, %al
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: select_v2i8:
@@ -1436,9 +1438,11 @@ define i1 @select_v2i8(ptr %s0, ptr %s1) {
 ; SSE42-NEXT:    pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE42-NEXT:    pmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; SSE42-NEXT:    pcmpeqq %xmm0, %xmm1
-; SSE42-NEXT:    movmskpd %xmm1, %eax
-; SSE42-NEXT:    testl %eax, %eax
-; SSE42-NEXT:    setne %al
+; SSE42-NEXT:    movmskpd %xmm1, %ecx
+; SSE42-NEXT:    movl %ecx, %eax
+; SSE42-NEXT:    andb $2, %al
+; SSE42-NEXT:    shrb %al
+; SSE42-NEXT:    orb %cl, %al
 ; SSE42-NEXT:    retq
 ;
 ; AVX1OR2-LABEL: select_v2i8:
@@ -1446,8 +1450,11 @@ define i1 @select_v2i8(ptr %s0, ptr %s1) {
 ; AVX1OR2-NEXT:    vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1OR2-NEXT:    vpmovzxbq {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX1OR2-NEXT:    vpcmpeqq %xmm1, %xmm0, %xmm0
-; AVX1OR2-NEXT:    vtestpd %xmm0, %xmm0
-; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    vmovmskpd %xmm0, %ecx
+; AVX1OR2-NEXT:    movl %ecx, %eax
+; AVX1OR2-NEXT:    andb $2, %al
+; AVX1OR2-NEXT:    shrb %al
+; AVX1OR2-NEXT:    orb %cl, %al
 ; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: select_v2i8:
@@ -1457,9 +1464,11 @@ define i1 @select_v2i8(ptr %s0, ptr %s1) {
 ; AVX512-NEXT:    movzwl (%rsi), %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm1
 ; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %k0
+; AVX512-NEXT:    kshiftrw $1, %k0, %k1
+; AVX512-NEXT:    kmovd %k1, %ecx
 ; AVX512-NEXT:    kmovd %k0, %eax
-; AVX512-NEXT:    testb $3, %al
-; AVX512-NEXT:    setne %al
+; AVX512-NEXT:    orb %cl, %al
+; AVX512-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX512-NEXT:    retq
   %v0 = load <2 x i8>, ptr %s0, align 1
   %v1 = load <2 x i8>, ptr %s1, align 1



More information about the llvm-commits mailing list