[llvm] [WIP][DAG] Add legalization handling for ABDS/ABDU (PR #92576)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri May 17 09:51:27 PDT 2024


https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/92576

Still WIP, but I wanted to get some visibility to other teams.

Always match ABD patterns pre-legalization, and use TargetLowering::expandABD to expand again during legalization.

>From 3d1ec6012e3324d10a7c2ec2a4e61a660f679766 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 17 May 2024 17:47:58 +0100
Subject: [PATCH] [WIP][DAG] Add legalization handling for ABDS/ABDU

Still WIP, but I wanted to get some visibility to other teams.

Always match ABD patterns pre-legalization, and use TargetLowering::expandABD to expand again during legalization.
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |   14 +-
 .../SelectionDAG/LegalizeIntegerTypes.cpp     |    9 +
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |    1 +
 .../SelectionDAG/LegalizeVectorTypes.cpp      |    6 +
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |   14 +
 llvm/test/CodeGen/AArch64/arm64-csel.ll       |   18 +-
 llvm/test/CodeGen/AArch64/arm64-vabs.ll       |   25 +-
 llvm/test/CodeGen/AArch64/neon-abd.ll         |  101 +-
 llvm/test/CodeGen/AArch64/sve-aba.ll          |   37 +-
 llvm/test/CodeGen/AArch64/sve-abd.ll          |   29 +-
 llvm/test/CodeGen/AMDGPU/sad.ll               |  369 ++++-
 llvm/test/CodeGen/ARM/iabs.ll                 |    5 +-
 llvm/test/CodeGen/ARM/neon_vabs.ll            |   10 +-
 llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll   | 1329 ++++++++---------
 llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll    |   24 +-
 llvm/test/CodeGen/RISCV/rvv/abd.ll            |   20 +-
 llvm/test/CodeGen/X86/abds-vector-128.ll      |  170 +--
 llvm/test/CodeGen/X86/abds.ll                 |   74 +-
 llvm/test/CodeGen/X86/abdu-vector-128.ll      |  157 +-
 llvm/test/CodeGen/X86/abdu.ll                 |   70 +-
 llvm/test/CodeGen/X86/midpoint-int-vec-128.ll |   75 +-
 21 files changed, 1334 insertions(+), 1223 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 2b181cd3ab1db..66740b4064431 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4140,13 +4140,13 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
   }
 
   // smax(a,b) - smin(a,b) --> abds(a,b)
-  if (hasOperation(ISD::ABDS, VT) &&
+  if ((!LegalOperations || hasOperation(ISD::ABDS, VT)) &&
       sd_match(N0, m_SMax(m_Value(A), m_Value(B))) &&
       sd_match(N1, m_SMin(m_Specific(A), m_Specific(B))))
     return DAG.getNode(ISD::ABDS, DL, VT, A, B);
 
   // umax(a,b) - umin(a,b) --> abdu(a,b)
-  if (hasOperation(ISD::ABDU, VT) &&
+  if ((!LegalOperations || hasOperation(ISD::ABDU, VT)) &&
       sd_match(N0, m_UMax(m_Value(A), m_Value(B))) &&
       sd_match(N1, m_UMin(m_Specific(A), m_Specific(B))))
     return DAG.getNode(ISD::ABDU, DL, VT, A, B);
@@ -10915,7 +10915,8 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
       (Opc0 != ISD::ZERO_EXTEND && Opc0 != ISD::SIGN_EXTEND &&
        Opc0 != ISD::SIGN_EXTEND_INREG)) {
     // fold (abs (sub nsw x, y)) -> abds(x, y)
-    if (AbsOp1->getFlags().hasNoSignedWrap() && hasOperation(ISD::ABDS, VT) &&
+    if (AbsOp1->getFlags().hasNoSignedWrap() &&
+        (!LegalOperations || hasOperation(ISD::ABDS, VT)) &&
         TLI.preferABDSToABSWithNSW(VT)) {
       SDValue ABD = DAG.getNode(ISD::ABDS, DL, VT, Op0, Op1);
       return DAG.getZExtOrTrunc(ABD, DL, SrcVT);
@@ -10937,7 +10938,8 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
   // fold abs(zext(x) - zext(y)) -> zext(abdu(x, y))
   EVT MaxVT = VT0.bitsGT(VT1) ? VT0 : VT1;
   if ((VT0 == MaxVT || Op0->hasOneUse()) &&
-      (VT1 == MaxVT || Op1->hasOneUse()) && hasOperation(ABDOpcode, MaxVT)) {
+      (VT1 == MaxVT || Op1->hasOneUse()) &&
+      (!LegalOperations || hasOperation(ABDOpcode, MaxVT))) {
     SDValue ABD = DAG.getNode(ABDOpcode, DL, MaxVT,
                               DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op0),
                               DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op1));
@@ -10947,7 +10949,7 @@ SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {
 
   // fold abs(sext(x) - sext(y)) -> abds(sext(x), sext(y))
   // fold abs(zext(x) - zext(y)) -> abdu(zext(x), zext(y))
-  if (hasOperation(ABDOpcode, VT)) {
+  if (!LegalOperations || hasOperation(ABDOpcode, VT)) {
     SDValue ABD = DAG.getNode(ABDOpcode, DL, VT, Op0, Op1);
     return DAG.getZExtOrTrunc(ABD, DL, SrcVT);
   }
@@ -12316,7 +12318,7 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
         N1.getOperand(1) == N2.getOperand(0)) {
       bool IsSigned = isSignedIntSetCC(CC);
       unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU;
-      if (hasOperation(ABDOpc, VT)) {
+      if (!LegalOperations || hasOperation(ABDOpc, VT)) {
         switch (CC) {
         case ISD::SETGT:
         case ISD::SETGE:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 98f64947bcabc..a42ff1d8a5018 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -188,6 +188,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::VP_SUB:
   case ISD::VP_MUL:      Res = PromoteIntRes_SimpleIntBinOp(N); break;
 
+  case ISD::ABDS:
   case ISD::VP_SMIN:
   case ISD::VP_SMAX:
   case ISD::SDIV:
@@ -195,6 +196,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::VP_SDIV:
   case ISD::VP_SREM:     Res = PromoteIntRes_SExtIntBinOp(N); break;
 
+  case ISD::ABDU:
   case ISD::VP_UMIN:
   case ISD::VP_UMAX:
   case ISD::UDIV:
@@ -2663,6 +2665,8 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
   case ISD::PARITY:      ExpandIntRes_PARITY(N, Lo, Hi); break;
   case ISD::Constant:    ExpandIntRes_Constant(N, Lo, Hi); break;
   case ISD::ABS:         ExpandIntRes_ABS(N, Lo, Hi); break;
+  case ISD::ABDS:
+  case ISD::ABDU:        ExpandIntRes_ABD(N, Lo, Hi); break;
   case ISD::CTLZ_ZERO_UNDEF:
   case ISD::CTLZ:        ExpandIntRes_CTLZ(N, Lo, Hi); break;
   case ISD::CTPOP:       ExpandIntRes_CTPOP(N, Lo, Hi); break;
@@ -3709,6 +3713,11 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N,
   Hi = DAG.getConstant(0, dl, NVT);
 }
 
+void DAGTypeLegalizer::ExpandIntRes_ABD(SDNode *N, SDValue &Lo, SDValue &Hi) {
+  SDValue Result = TLI.expandABD(N, DAG);
+  SplitInteger(Result, Lo, Hi);
+}
+
 void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N,
                                           SDValue &Lo, SDValue &Hi) {
   SDLoc dl(N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index d925089d5689f..86277898f1644 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -424,6 +424,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   void ExpandIntRes_AssertZext        (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_Constant          (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_ABS               (SDNode *N, SDValue &Lo, SDValue &Hi);
+  void ExpandIntRes_ABD               (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_CTLZ              (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_CTPOP             (SDNode *N, SDValue &Lo, SDValue &Hi);
   void ExpandIntRes_CTTZ              (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index cd858003cf03b..c649b9fa9944c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -136,6 +136,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::FMINIMUM:
   case ISD::FMAXIMUM:
   case ISD::FLDEXP:
+  case ISD::ABDS:
+  case ISD::ABDU:
   case ISD::SMIN:
   case ISD::SMAX:
   case ISD::UMIN:
@@ -1171,6 +1173,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::MUL: case ISD::VP_MUL:
   case ISD::MULHS:
   case ISD::MULHU:
+  case ISD::ABDS:
+  case ISD::ABDU:
   case ISD::FADD: case ISD::VP_FADD:
   case ISD::FSUB: case ISD::VP_FSUB:
   case ISD::FMUL: case ISD::VP_FMUL:
@@ -4231,6 +4235,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::MUL: case ISD::VP_MUL:
   case ISD::MULHS:
   case ISD::MULHU:
+  case ISD::ABDS:
+  case ISD::ABDU:
   case ISD::OR: case ISD::VP_OR:
   case ISD::SUB: case ISD::VP_SUB:
   case ISD::XOR: case ISD::VP_XOR:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 3ec6b9b795079..c99d32ac91aac 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9194,6 +9194,20 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const {
                        DAG.getNode(ISD::USUBSAT, dl, VT, LHS, RHS),
                        DAG.getNode(ISD::USUBSAT, dl, VT, RHS, LHS));
 
+  // If the subtract doesn't overflow then just use abs(sub())
+  // NOTE: don't use frozen operands for value tracking.
+  if (DAG.willNotOverflowSub(IsSigned, N->getOperand(0), N->getOperand(1)))
+    return DAG.getNode(ISD::ABS, dl, VT,
+                       DAG.getNode(ISD::SUB, dl, VT, LHS, RHS));
+  if (DAG.willNotOverflowSub(IsSigned, N->getOperand(1), N->getOperand(0)))
+    return DAG.getNode(ISD::ABS, dl, VT,
+                       DAG.getNode(ISD::SUB, dl, VT, RHS, LHS));
+
+  // FIXME: Should really try to split the vector in case it's legal on a
+  // subvector.
+  if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT))
+    return DAG.UnrollVectorOp(N);
+
   // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
   // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs))
   EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
diff --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll
index 1cf99d1b31a8b..0c31f1d1d2d6b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll
@@ -64,8 +64,9 @@ define i32 at foo4(i32 %a) nounwind ssp {
 define i32 at foo5(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: foo5:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    subs w8, w0, w1
-; CHECK-NEXT:    cneg w0, w8, mi
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w0, w9, w8, gt
 ; CHECK-NEXT:    ret
 entry:
   %sub = sub nsw i32 %a, %b
@@ -97,12 +98,13 @@ l.else:
 define i32 @foo7(i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: foo7:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    subs w8, w0, w1
-; CHECK-NEXT:    cneg w9, w8, mi
-; CHECK-NEXT:    cmn w8, #1
-; CHECK-NEXT:    csel w10, w9, w0, lt
-; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w10, w9, ge
+; CHECK-NEXT:    sub w8, w1, w0
+; CHECK-NEXT:    subs w9, w0, w1
+; CHECK-NEXT:    csel w8, w9, w8, gt
+; CHECK-NEXT:    cmn w9, #1
+; CHECK-NEXT:    csel w10, w8, w0, lt
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    csel w0, w10, w8, ge
 ; CHECK-NEXT:    ret
 entry:
   %sub = sub nsw i32 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index f7d31a214563b..ba3de47c3e7c3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1799,29 +1799,16 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov.d x8, v0[1]
 ; CHECK-NEXT:    mov.d x9, v1[1]
+; CHECK-NEXT:    mov x1, xzr
 ; CHECK-NEXT:    fmov x10, d0
 ; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    asr x12, x10, #63
-; CHECK-NEXT:    asr x13, x11, #63
+; CHECK-NEXT:    mov x3, xzr
+; CHECK-NEXT:    sub x12, x11, x10
 ; CHECK-NEXT:    subs x10, x10, x11
-; CHECK-NEXT:    asr x11, x8, #63
-; CHECK-NEXT:    asr x14, x9, #63
-; CHECK-NEXT:    sbc x12, x12, x13
+; CHECK-NEXT:    csel x0, x10, x12, gt
+; CHECK-NEXT:    sub x10, x9, x8
 ; CHECK-NEXT:    subs x8, x8, x9
-; CHECK-NEXT:    sbc x9, x11, x14
-; CHECK-NEXT:    asr x13, x12, #63
-; CHECK-NEXT:    asr x11, x9, #63
-; CHECK-NEXT:    eor x10, x10, x13
-; CHECK-NEXT:    eor x8, x8, x11
-; CHECK-NEXT:    eor x9, x9, x11
-; CHECK-NEXT:    subs x2, x8, x11
-; CHECK-NEXT:    eor x8, x12, x13
-; CHECK-NEXT:    sbc x3, x9, x11
-; CHECK-NEXT:    subs x9, x10, x13
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    sbc x1, x8, x13
-; CHECK-NEXT:    mov.d v0[1], x1
-; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    csel x2, x8, x10, gt
 ; CHECK-NEXT:    ret
   %aext = sext <2 x i64> %a to <2 x i128>
   %bext = sext <2 x i64> %b to <2 x i128>
diff --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll
index 901cb8adc23f0..6081a4323376b 100644
--- a/llvm/test/CodeGen/AArch64/neon-abd.ll
+++ b/llvm/test/CodeGen/AArch64/neon-abd.ll
@@ -49,11 +49,12 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) #0 {
 define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 {
 ; CHECK-LABEL: sabd_4h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    shl v0.4h, v0.4h, #8
 ; CHECK-NEXT:    shl v1.4h, v1.4h, #8
-; CHECK-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-NEXT:    shl v0.4h, v0.4h, #8
 ; CHECK-NEXT:    sshr v1.4h, v1.4h, #8
+; CHECK-NEXT:    sshr v0.4h, v0.4h, #8
 ; CHECK-NEXT:    sabd v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    bic v0.4h, #255, lsl #8
 ; CHECK-NEXT:    ret
   %a.sext = sext <4 x i8> %a to <4 x i16>
   %b.sext = sext <4 x i8> %b to <4 x i16>
@@ -103,11 +104,13 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) #0 {
 define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 {
 ; CHECK-LABEL: sabd_2s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    shl v0.2s, v0.2s, #16
 ; CHECK-NEXT:    shl v1.2s, v1.2s, #16
-; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-NEXT:    movi d2, #0x00ffff0000ffff
 ; CHECK-NEXT:    sshr v1.2s, v1.2s, #16
+; CHECK-NEXT:    sshr v0.2s, v0.2s, #16
 ; CHECK-NEXT:    sabd v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    ret
   %a.sext = sext <2 x i16> %a to <2 x i32>
   %b.sext = sext <2 x i16> %b to <2 x i32>
@@ -146,25 +149,16 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, v0.d[1]
 ; CHECK-NEXT:    mov x9, v1.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x12, d1
-; CHECK-NEXT:    asr x14, x10, #63
-; CHECK-NEXT:    asr x11, x8, #63
-; CHECK-NEXT:    asr x13, x9, #63
-; CHECK-NEXT:    asr x15, x12, #63
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    sub x10, x9, x8
 ; CHECK-NEXT:    subs x8, x8, x9
-; CHECK-NEXT:    sbc x9, x11, x13
-; CHECK-NEXT:    subs x10, x10, x12
-; CHECK-NEXT:    sbc x11, x14, x15
-; CHECK-NEXT:    asr x9, x9, #63
-; CHECK-NEXT:    asr x11, x11, #63
-; CHECK-NEXT:    eor x8, x8, x9
-; CHECK-NEXT:    eor x10, x10, x11
-; CHECK-NEXT:    sub x8, x8, x9
-; CHECK-NEXT:    sub x10, x10, x11
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    csel x8, x8, x10, gt
+; CHECK-NEXT:    sub x10, x11, x9
+; CHECK-NEXT:    subs x9, x9, x11
+; CHECK-NEXT:    csel x9, x9, x10, gt
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    mov v0.d[1], x8
 ; CHECK-NEXT:    ret
   %a.sext = sext <2 x i64> %a to <2 x i128>
   %b.sext = sext <2 x i64> %b to <2 x i128>
@@ -232,8 +226,8 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) #0 {
 define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 {
 ; CHECK-LABEL: uabd_4h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
 ; CHECK-NEXT:    bic v1.4h, #255, lsl #8
+; CHECK-NEXT:    bic v0.4h, #255, lsl #8
 ; CHECK-NEXT:    uabd v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
   %a.zext = zext <4 x i8> %a to <4 x i16>
@@ -285,8 +279,8 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 {
 ; CHECK-LABEL: uabd_2s_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi d2, #0x00ffff0000ffff
-; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
 ; CHECK-NEXT:    uabd v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
   %a.zext = zext <2 x i16> %a to <2 x i32>
@@ -324,23 +318,9 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) #0 {
 define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK-LABEL: uabd_2d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, v0.d[1]
-; CHECK-NEXT:    mov x9, v1.d[1]
-; CHECK-NEXT:    fmov x10, d0
-; CHECK-NEXT:    fmov x11, d1
-; CHECK-NEXT:    subs x8, x8, x9
-; CHECK-NEXT:    ngc x9, xzr
-; CHECK-NEXT:    subs x10, x10, x11
-; CHECK-NEXT:    ngc x11, xzr
-; CHECK-NEXT:    asr x9, x9, #63
-; CHECK-NEXT:    asr x11, x11, #63
-; CHECK-NEXT:    eor x8, x8, x9
-; CHECK-NEXT:    eor x10, x10, x11
-; CHECK-NEXT:    sub x8, x8, x9
-; CHECK-NEXT:    sub x10, x10, x11
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    fmov d0, x10
-; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    uqsub v2.2d, v1.2d, v0.2d
+; CHECK-NEXT:    uqsub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
   %a.zext = zext <2 x i64> %a to <2 x i128>
   %b.zext = zext <2 x i64> %b to <2 x i128>
@@ -439,8 +419,18 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) #0 {
 define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) #0 {
 ; CHECK-LABEL: sabd_v2i64_nsw:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub v0.2d, v0.2d, v1.2d
-; CHECK-NEXT:    abs v0.2d, v0.2d
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    sub x10, x9, x8
+; CHECK-NEXT:    subs x8, x8, x9
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    csel x8, x8, x10, gt
+; CHECK-NEXT:    sub x10, x11, x9
+; CHECK-NEXT:    subs x9, x9, x11
+; CHECK-NEXT:    csel x9, x9, x10, gt
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    mov v0.d[1], x8
 ; CHECK-NEXT:    ret
   %sub = sub nsw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
@@ -483,11 +473,18 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-LABEL: smaxmin_v2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmgt v2.2d, v0.2d, v1.2d
-; CHECK-NEXT:    cmgt v3.2d, v1.2d, v0.2d
-; CHECK-NEXT:    bsl v2.16b, v0.16b, v1.16b
-; CHECK-NEXT:    bif v0.16b, v1.16b, v3.16b
-; CHECK-NEXT:    sub v0.2d, v2.2d, v0.2d
+; CHECK-NEXT:    mov x8, v0.d[1]
+; CHECK-NEXT:    mov x9, v1.d[1]
+; CHECK-NEXT:    fmov x11, d1
+; CHECK-NEXT:    sub x10, x9, x8
+; CHECK-NEXT:    subs x8, x8, x9
+; CHECK-NEXT:    fmov x9, d0
+; CHECK-NEXT:    csel x8, x8, x10, gt
+; CHECK-NEXT:    sub x10, x11, x9
+; CHECK-NEXT:    subs x9, x9, x11
+; CHECK-NEXT:    csel x9, x9, x10, gt
+; CHECK-NEXT:    fmov d0, x9
+; CHECK-NEXT:    mov v0.d[1], x8
 ; CHECK-NEXT:    ret
   %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
@@ -531,11 +528,9 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-LABEL: umaxmin_v2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmhi v2.2d, v0.2d, v1.2d
-; CHECK-NEXT:    cmhi v3.2d, v1.2d, v0.2d
-; CHECK-NEXT:    bsl v2.16b, v0.16b, v1.16b
-; CHECK-NEXT:    bif v0.16b, v1.16b, v3.16b
-; CHECK-NEXT:    sub v0.2d, v2.2d, v0.2d
+; CHECK-NEXT:    uqsub v2.2d, v1.2d, v0.2d
+; CHECK-NEXT:    uqsub v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
 ; CHECK-NEXT:    ret
   %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
diff --git a/llvm/test/CodeGen/AArch64/sve-aba.ll b/llvm/test/CodeGen/AArch64/sve-aba.ll
index 6859f7d017044..b084fbd8c6a92 100644
--- a/llvm/test/CodeGen/AArch64/sve-aba.ll
+++ b/llvm/test/CodeGen/AArch64/sve-aba.ll
@@ -24,9 +24,10 @@ define <vscale x 16 x i8> @saba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
 define <vscale x 16 x i8> @saba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
 ; CHECK-LABEL: saba_b_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z2.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    saba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    mov z1.b, #1 // =0x1
+; CHECK-NEXT:    eor p0.b, p2/z, p1.b, p0.b
+; CHECK-NEXT:    add z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %c.sext = sext <vscale x 16 x i1> %c to <vscale x 16 x i8>
@@ -75,9 +76,11 @@ define <vscale x 8 x i16> @saba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x
 ; CHECK-LABEL: saba_h_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
 ; CHECK-NEXT:    sxtb z2.h, p0/m, z2.h
-; CHECK-NEXT:    saba z0.h, z1.h, z2.h
+; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT:    sabd z1.h, p0/m, z1.h, z2.h
+; CHECK-NEXT:    and z1.h, z1.h, #0xff
+; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16>
@@ -126,9 +129,11 @@ define <vscale x 4 x i32> @saba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x
 ; CHECK-LABEL: saba_s_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
 ; CHECK-NEXT:    sxth z2.s, p0/m, z2.s
-; CHECK-NEXT:    saba z0.s, z1.s, z2.s
+; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
+; CHECK-NEXT:    sabd z1.s, p0/m, z1.s, z2.s
+; CHECK-NEXT:    and z1.s, z1.s, #0xffff
+; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32>
@@ -177,9 +182,10 @@ define <vscale x 2 x i64> @saba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x
 ; CHECK-LABEL: saba_d_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
 ; CHECK-NEXT:    sxtw z2.d, p0/m, z2.d
-; CHECK-NEXT:    saba z0.d, z1.d, z2.d
+; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    sabd z1.d, p0/m, z1.d, z2.d
+; CHECK-NEXT:    adr z0.d, [z0.d, z1.d, uxtw]
 ; CHECK-NEXT:    ret
   %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64>
@@ -231,9 +237,10 @@ define <vscale x 16 x i8> @uaba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b,
 define <vscale x 16 x i8> @uaba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 {
 ; CHECK-LABEL: uaba_b_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z2.b, p1/z, #1 // =0x1
-; CHECK-NEXT:    uaba z0.b, z1.b, z2.b
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    mov z1.b, #1 // =0x1
+; CHECK-NEXT:    eor p0.b, p2/z, p1.b, p0.b
+; CHECK-NEXT:    add z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %c.zext = zext <vscale x 16 x i1> %c to <vscale x 16 x i8>
@@ -281,8 +288,8 @@ define <vscale x 8 x i16> @uaba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b,
 define <vscale x 8 x i16> @uaba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 {
 ; CHECK-LABEL: uaba_h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z1.h, z1.h, #0xff
 ; CHECK-NEXT:    and z2.h, z2.h, #0xff
+; CHECK-NEXT:    and z1.h, z1.h, #0xff
 ; CHECK-NEXT:    uaba z0.h, z1.h, z2.h
 ; CHECK-NEXT:    ret
   %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
@@ -331,8 +338,8 @@ define <vscale x 4 x i32> @uaba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b,
 define <vscale x 4 x i32> @uaba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 {
 ; CHECK-LABEL: uaba_s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z1.s, z1.s, #0xffff
 ; CHECK-NEXT:    and z2.s, z2.s, #0xffff
+; CHECK-NEXT:    and z1.s, z1.s, #0xffff
 ; CHECK-NEXT:    uaba z0.s, z1.s, z2.s
 ; CHECK-NEXT:    ret
   %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
@@ -381,8 +388,8 @@ define <vscale x 2 x i64> @uaba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b,
 define <vscale x 2 x i64> @uaba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 {
 ; CHECK-LABEL: uaba_d_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
 ; CHECK-NEXT:    and z2.d, z2.d, #0xffffffff
+; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
 ; CHECK-NEXT:    uaba z0.d, z1.d, z2.d
 ; CHECK-NEXT:    ret
   %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
diff --git a/llvm/test/CodeGen/AArch64/sve-abd.ll b/llvm/test/CodeGen/AArch64/sve-abd.ll
index 7b492229e3d23..361f1601f575a 100644
--- a/llvm/test/CodeGen/AArch64/sve-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-abd.ll
@@ -24,10 +24,9 @@ define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
 ; CHECK-LABEL: sabd_b_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    mov z1.b, p1/z, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    sabd z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    eor p0.b, p2/z, p1.b, p0.b
+; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -54,9 +53,10 @@ define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-LABEL: sabd_h_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sxtb z1.h, p0/m, z1.h
+; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
 ; CHECK-NEXT:    sabd z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
   %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
@@ -83,9 +83,10 @@ define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-LABEL: sabd_s_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sxth z1.s, p0/m, z1.s
+; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
 ; CHECK-NEXT:    sabd z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    and z0.s, z0.s, #0xffff
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
   %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
@@ -112,9 +113,10 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-LABEL: sabd_d_promoted_ops:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sxtw z1.d, p0/m, z1.d
+; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
 ; CHECK-NEXT:    sabd z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
   %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
@@ -144,10 +146,9 @@ define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
 ; CHECK-LABEL: uabd_b_promoted_ops:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    eor p0.b, p2/z, p1.b, p0.b
 ; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
-; CHECK-NEXT:    mov z1.b, p1/z, #1 // =0x1
-; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    uabd z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -173,8 +174,8 @@ define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
 ; CHECK-LABEL: uabd_h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z0.h, z0.h, #0xff
 ; CHECK-NEXT:    and z1.h, z1.h, #0xff
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    uabd z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    ret
@@ -202,8 +203,8 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
 ; CHECK-LABEL: uabd_s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z0.s, z0.s, #0xffff
 ; CHECK-NEXT:    and z1.s, z1.s, #0xffff
+; CHECK-NEXT:    and z0.s, z0.s, #0xffff
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    uabd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
@@ -231,8 +232,8 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
 ; CHECK-LABEL: uabd_d_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
 ; CHECK-NEXT:    and z1.d, z1.d, #0xffffffff
+; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    uabd z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    ret
@@ -265,8 +266,8 @@ define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
 define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a, <vscale x 4 x i16> %b) #0 {
 ; CHECK-LABEL: uabd_non_matching_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and z0.s, z0.s, #0xff
 ; CHECK-NEXT:    and z1.s, z1.s, #0xffff
+; CHECK-NEXT:    and z0.s, z0.s, #0xff
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    uabd z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 1b03065592956..137c458aa50eb 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -1,8 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -earlycse-debug-hash -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 
-; GCN-LABEL: {{^}}v_sad_u32_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_sad_u32 v2, s0, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
 
@@ -16,9 +27,18 @@ define amdgpu_kernel void @v_sad_u32_pat1(ptr addrspace(1) %out, i32 %a, i32 %b,
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_constant_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, 20
 define amdgpu_kernel void @v_sad_u32_constant_pat1(ptr addrspace(1) %out, i32 %a) {
+; GCN-LABEL: v_sad_u32_constant_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x5a
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_sad_u32 v2, s2, v0, 20
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, 90
   %t0 = select i1 %icmp0, i32 %a, i32 90
 
@@ -32,9 +52,19 @@ define amdgpu_kernel void @v_sad_u32_constant_pat1(ptr addrspace(1) %out, i32 %a
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_sad_u32 v2, s0, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %sub0 = sub i32 %a, %b
   %sub1 = sub i32 %b, %a
@@ -46,12 +76,28 @@ define amdgpu_kernel void @v_sad_u32_pat2(ptr addrspace(1) %out, i32 %a, i32 %b,
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_sub_pat1:
-; GCN: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_min_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_sub_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_min_u32 s3, s0, s1
+; GCN-NEXT:    s_max_u32 s0, s0, s1
+; GCN-NEXT:    s_sub_i32 s0, s0, s3
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    s_add_i32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
 
@@ -66,9 +112,25 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat1(ptr addrspace(1) %out, i
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_add_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_add_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_mov_b32_e32 v3, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    v_sad_u32 v2, s0, v2, v3
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
 
@@ -82,9 +144,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_add_pat1(ptr addrspace(1) %out, i
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_max_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_max_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_max_u32 s3, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    flat_store_dword v[0:1], v3
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
   store volatile i32 %t0, ptr addrspace(5) undef
@@ -99,9 +179,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_max_pat1(ptr addrspace(1) %out, i
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_min_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_min_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_min_u32 s3, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    flat_store_dword v[0:1], v3
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
 
@@ -117,9 +215,27 @@ define amdgpu_kernel void @v_sad_u32_multi_use_min_pat1(ptr addrspace(1) %out, i
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_sub_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_sub_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s3, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_sad_u32 v3, s0, v0, v1
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    flat_store_dword v[0:1], v3
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %sub0 = sub i32 %a, %b
   store volatile i32 %sub0, ptr addrspace(5) undef
@@ -132,11 +248,29 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(ptr addrspace(1) %out, i
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_multi_use_select_pat2:
-; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cmp_gt_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: v_sad_u32_multi_use_select_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_add_u32 s8, s8, s7
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s3, s0, s1
+; GCN-NEXT:    s_sub_i32 s6, s1, s0
+; GCN-NEXT:    s_cmp_gt_u32 s0, s1
+; GCN-NEXT:    s_cselect_b32 s0, s3, s6
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    s_add_i32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dword v2, v0, s[8:11], 0 offen
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %sub0 = sub i32 %a, %b
   %sub1 = sub i32 %b, %a
@@ -149,12 +283,29 @@ define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(ptr addrspace(1) %out
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_vector_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_vector_pat1(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; GCN-LABEL: v_sad_u32_vector_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x4
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0xc
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NEXT:    v_sad_u32 v3, s11, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_sad_u32 v2, s10, v2, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_sad_u32 v1, s9, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-NEXT:    v_sad_u32 v0, s8, v0, v4
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt <4 x i32> %a, %b
   %t0 = select <4 x i1> %icmp0, <4 x i32> %a, <4 x i32> %b
 
@@ -168,12 +319,29 @@ define amdgpu_kernel void @v_sad_u32_vector_pat1(ptr addrspace(1) %out, <4 x i32
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_vector_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_vector_pat2(ptr addrspace(1) %out, <4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+; GCN-LABEL: v_sad_u32_vector_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x4
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0xc
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    v_mov_b32_e32 v2, s14
+; GCN-NEXT:    v_sad_u32 v3, s11, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_sad_u32 v2, s10, v2, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_sad_u32 v1, s9, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-NEXT:    v_mov_b32_e32 v4, s0
+; GCN-NEXT:    v_sad_u32 v0, s8, v0, v4
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt <4 x i32> %a, %b
   %sub0 = sub <4 x i32> %a, %b
   %sub1 = sub <4 x i32> %b, %a
@@ -185,10 +353,22 @@ define amdgpu_kernel void @v_sad_u32_vector_pat2(ptr addrspace(1) %out, <4 x i32
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_i16_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_i16_pat1(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) {
-
+; GCN-LABEL: v_sad_u32_i16_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s6, s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s4, s6, 0xffff
+; GCN-NEXT:    s_lshr_b32 s0, s0, 16
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    v_sad_u32 v2, s4, v1, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-NEXT:    flat_store_short v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i16 %a, %b
   %t0 = select i1 %icmp0, i16 %a, i16 %b
 
@@ -202,9 +382,22 @@ define amdgpu_kernel void @v_sad_u32_i16_pat1(ptr addrspace(1) %out, i16 %a, i16
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_i16_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_i16_pat2(ptr addrspace(1) %out) {
+; GCN-LABEL: v_sad_u32_i16_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    flat_load_ushort v0, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT:    flat_load_ushort v1, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    flat_load_ushort v2, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_sad_u32 v2, v0, v1, v2
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_short v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %a = load volatile i16, ptr addrspace(1) undef
   %b = load volatile i16, ptr addrspace(1) undef
   %c = load volatile i16, ptr addrspace(1) undef
@@ -219,9 +412,22 @@ define amdgpu_kernel void @v_sad_u32_i16_pat2(ptr addrspace(1) %out) {
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_i8_pat1:
-; GCN: v_sad_u32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) {
+; GCN-LABEL: v_sad_u32_i8_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_and_b32 s3, s2, 0xff
+; GCN-NEXT:    s_bfe_u32 s4, s2, 0x80008
+; GCN-NEXT:    s_lshr_b32 s2, s2, 16
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_sad_u32 v2, s3, v0, v1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_byte v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i8 %a, %b
   %t0 = select i1 %icmp0, i8 %a, i8 %b
 
@@ -235,9 +441,22 @@ define amdgpu_kernel void @v_sad_u32_i8_pat1(ptr addrspace(1) %out, i8 %a, i8 %b
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_i8_pat2:
-; GCN: v_sad_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_i8_pat2(ptr addrspace(1) %out) {
+; GCN-LABEL: v_sad_u32_i8_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    flat_load_ubyte v0, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT:    flat_load_ubyte v1, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    flat_load_ubyte v2, v[0:1] glc
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_sad_u32 v2, v0, v1, v2
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    flat_store_byte v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %a = load volatile i8, ptr addrspace(1) undef
   %b = load volatile i8, ptr addrspace(1) undef
   %c = load volatile i8, ptr addrspace(1) undef
@@ -252,15 +471,26 @@ define amdgpu_kernel void @v_sad_u32_i8_pat2(ptr addrspace(1) %out) {
   ret void
 }
 
-; GCN-LABEL: {{^}}s_sad_u32_i8_pat2:
-; GCN: s_load_dword
-; GCN-DAG: s_bfe_u32
-; GCN-DAG: s_sub_i32
-; GCN-DAG: s_and_b32
-; GCN-DAG: s_sub_i32
-; GCN-DAG: s_lshr_b32
-; GCN: s_add_i32
 define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) {
+; GCN-LABEL: s_sad_u32_i8_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dword s2, s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s4, s2, 8
+; GCN-NEXT:    s_and_b32 s3, s2, 0xff
+; GCN-NEXT:    s_bfe_u32 s5, s2, 0x80008
+; GCN-NEXT:    s_lshr_b32 s6, s2, 16
+; GCN-NEXT:    s_sub_i32 s7, s2, s4
+; GCN-NEXT:    s_sub_i32 s2, s4, s2
+; GCN-NEXT:    s_cmp_gt_u32 s3, s5
+; GCN-NEXT:    s_cselect_b32 s2, s7, s2
+; GCN-NEXT:    s_add_i32 s2, s2, s6
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    flat_store_byte v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i8 %a, %b
   %sub0 = sub i8 %a, %b
   %sub1 = sub i8 %b, %a
@@ -272,12 +502,22 @@ define amdgpu_kernel void @s_sad_u32_i8_pat2(ptr addrspace(1) %out, i8 zeroext %
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat1:
-; GCN-DAG: s_cmp_le_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; GCN-LABEL: v_sad_u32_mismatched_operands_pat1:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_max_u32 s6, s0, s1
+; GCN-NEXT:    s_cmp_le_u32 s0, s1
+; GCN-NEXT:    s_cselect_b32 s0, s0, s3
+; GCN-NEXT:    s_sub_i32 s0, s6, s0
+; GCN-NEXT:    s_add_i32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
 
@@ -291,11 +531,22 @@ define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(ptr addrspace(1) %
   ret void
 }
 
-; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat2:
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; GCN-LABEL: v_sad_u32_mismatched_operands_pat2:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s3, s0, s3
+; GCN-NEXT:    s_sub_i32 s6, s1, s0
+; GCN-NEXT:    s_cmp_lt_u32 s1, s0
+; GCN-NEXT:    s_cselect_b32 s0, s3, s6
+; GCN-NEXT:    s_add_i32 s0, s0, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    flat_store_dword v[0:1], v2
+; GCN-NEXT:    s_endpgm
   %icmp0 = icmp ugt i32 %a, %b
   %sub0 = sub i32 %a, %d
   %sub1 = sub i32 %b, %a
diff --git a/llvm/test/CodeGen/ARM/iabs.ll b/llvm/test/CodeGen/ARM/iabs.ll
index fffa9555b2966..950320efb61a2 100644
--- a/llvm/test/CodeGen/ARM/iabs.ll
+++ b/llvm/test/CodeGen/ARM/iabs.ll
@@ -26,8 +26,9 @@ define i32 @test(i32 %a) {
 define i32 @test2(i32 %a, i32 %b) nounwind readnone ssp {
 ; CHECK-LABEL: test2:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    subs r0, r0, r1
-; CHECK-NEXT:    rsbmi r0, r0, #0
+; CHECK-NEXT:    subs r2, r1, r0
+; CHECK-NEXT:    sublt r2, r0, r1
+; CHECK-NEXT:    mov r0, r2
 ; CHECK-NEXT:    bx lr
 entry:
   %sub = sub nsw i32 %a, %b
diff --git a/llvm/test/CodeGen/ARM/neon_vabs.ll b/llvm/test/CodeGen/ARM/neon_vabs.ll
index 4064aae65f665..37d389441d217 100644
--- a/llvm/test/CodeGen/ARM/neon_vabs.ll
+++ b/llvm/test/CodeGen/ARM/neon_vabs.ll
@@ -149,7 +149,10 @@ define <4 x i32> @test11(<4 x i16> %a, <4 x i16> %b) nounwind {
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmov d16, r2, r3
 ; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vabdl.u16 q8, d17, d16
+; CHECK-NEXT:    vmin.u16 d18, d17, d16
+; CHECK-NEXT:    vmax.u16 d16, d17, d16
+; CHECK-NEXT:    vsub.i16 d16, d16, d18
+; CHECK-NEXT:    vmovl.u16 q8, d16
 ; CHECK-NEXT:    vmov r0, r1, d16
 ; CHECK-NEXT:    vmov r2, r3, d17
 ; CHECK-NEXT:    mov pc, lr
@@ -166,7 +169,10 @@ define <8 x i16> @test12(<8 x i8> %a, <8 x i8> %b) nounwind {
 ; CHECK:       @ %bb.0:
 ; CHECK-NEXT:    vmov d16, r2, r3
 ; CHECK-NEXT:    vmov d17, r0, r1
-; CHECK-NEXT:    vabdl.u8 q8, d17, d16
+; CHECK-NEXT:    vmin.u8 d18, d17, d16
+; CHECK-NEXT:    vmax.u8 d16, d17, d16
+; CHECK-NEXT:    vsub.i8 d16, d16, d18
+; CHECK-NEXT:    vmovl.u8 q8, d16
 ; CHECK-NEXT:    vmov r0, r1, d16
 ; CHECK-NEXT:    vmov r2, r3, d17
 ; CHECK-NEXT:    mov pc, lr
diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
index 7a6640fea2d1e..4410154b7516e 100644
--- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll
@@ -72,41 +72,51 @@ entry:
 define <2 x i64> @sub_absv_64(<2 x i64> %a, <2 x i64> %b) local_unnamed_addr {
 ; CHECK-PWR9-LABEL: sub_absv_64:
 ; CHECK-PWR9:       # %bb.0: # %entry
-; CHECK-PWR9-NEXT:    vsubudm v2, v2, v3
-; CHECK-PWR9-NEXT:    vnegd v3, v2
+; CHECK-PWR9-NEXT:    vminsd v4, v2, v3
 ; CHECK-PWR9-NEXT:    vmaxsd v2, v2, v3
+; CHECK-PWR9-NEXT:    vsubudm v2, v2, v4
 ; CHECK-PWR9-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: sub_absv_64:
 ; CHECK-PWR8:       # %bb.0: # %entry
-; CHECK-PWR8-NEXT:    vsubudm v2, v2, v3
-; CHECK-PWR8-NEXT:    xxlxor v3, v3, v3
-; CHECK-PWR8-NEXT:    vsubudm v3, v3, v2
+; CHECK-PWR8-NEXT:    vminsd v4, v2, v3
 ; CHECK-PWR8-NEXT:    vmaxsd v2, v2, v3
+; CHECK-PWR8-NEXT:    vsubudm v2, v2, v4
 ; CHECK-PWR8-NEXT:    blr
 ;
 ; CHECK-PWR7-LABEL: sub_absv_64:
 ; CHECK-PWR7:       # %bb.0: # %entry
-; CHECK-PWR7-NEXT:    addi r3, r1, -48
+; CHECK-PWR7-NEXT:    addi r3, r1, -80
+; CHECK-PWR7-NEXT:    addi r4, r1, -64
 ; CHECK-PWR7-NEXT:    stxvd2x v2, 0, r3
-; CHECK-PWR7-NEXT:    addi r3, r1, -32
-; CHECK-PWR7-NEXT:    stxvd2x v3, 0, r3
-; CHECK-PWR7-NEXT:    ld r4, -40(r1)
-; CHECK-PWR7-NEXT:    ld r5, -24(r1)
-; CHECK-PWR7-NEXT:    ld r3, -48(r1)
-; CHECK-PWR7-NEXT:    sub r4, r4, r5
-; CHECK-PWR7-NEXT:    sradi r5, r4, 63
-; CHECK-PWR7-NEXT:    xor r4, r4, r5
-; CHECK-PWR7-NEXT:    sub r4, r4, r5
-; CHECK-PWR7-NEXT:    ld r5, -32(r1)
-; CHECK-PWR7-NEXT:    std r4, -8(r1)
-; CHECK-PWR7-NEXT:    sub r3, r3, r5
-; CHECK-PWR7-NEXT:    sradi r4, r3, 63
-; CHECK-PWR7-NEXT:    xor r3, r3, r4
-; CHECK-PWR7-NEXT:    sub r3, r3, r4
+; CHECK-PWR7-NEXT:    stxvd2x v3, 0, r4
+; CHECK-PWR7-NEXT:    ld r3, -72(r1)
+; CHECK-PWR7-NEXT:    ld r4, -56(r1)
+; CHECK-PWR7-NEXT:    ld r6, -64(r1)
+; CHECK-PWR7-NEXT:    sub r5, r3, r4
+; CHECK-PWR7-NEXT:    cmpd r3, r4
+; CHECK-PWR7-NEXT:    std r5, -24(r1)
+; CHECK-PWR7-NEXT:    ld r5, -80(r1)
+; CHECK-PWR7-NEXT:    sub r7, r5, r6
+; CHECK-PWR7-NEXT:    std r7, -32(r1)
+; CHECK-PWR7-NEXT:    sub r7, r4, r3
+; CHECK-PWR7-NEXT:    li r3, 0
+; CHECK-PWR7-NEXT:    li r4, -1
+; CHECK-PWR7-NEXT:    std r7, -40(r1)
+; CHECK-PWR7-NEXT:    sub r7, r6, r5
+; CHECK-PWR7-NEXT:    std r7, -48(r1)
+; CHECK-PWR7-NEXT:    iselgt r7, r4, r3
+; CHECK-PWR7-NEXT:    cmpd r5, r6
+; CHECK-PWR7-NEXT:    std r7, -8(r1)
+; CHECK-PWR7-NEXT:    iselgt r3, r4, r3
 ; CHECK-PWR7-NEXT:    std r3, -16(r1)
-; CHECK-PWR7-NEXT:    addi r3, r1, -16
+; CHECK-PWR7-NEXT:    addi r3, r1, -32
 ; CHECK-PWR7-NEXT:    lxvd2x v2, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, -48
+; CHECK-PWR7-NEXT:    lxvd2x v3, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, -16
+; CHECK-PWR7-NEXT:    lxvd2x v4, 0, r3
+; CHECK-PWR7-NEXT:    xxsel v2, v3, v2, v4
 ; CHECK-PWR7-NEXT:    blr
 entry:
   %0 = sub nsw <2 x i64> %a, %b
@@ -127,10 +137,9 @@ define <4 x i32> @sub_absv_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr {
 ;
 ; CHECK-PWR78-LABEL: sub_absv_32:
 ; CHECK-PWR78:       # %bb.0: # %entry
-; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v3
-; CHECK-PWR78-NEXT:    xxlxor v3, v3, v3
-; CHECK-PWR78-NEXT:    vsubuwm v3, v3, v2
+; CHECK-PWR78-NEXT:    vminsw v4, v2, v3
 ; CHECK-PWR78-NEXT:    vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
 entry:
   %0 = sub nsw <4 x i32> %a, %b
@@ -143,10 +152,9 @@ entry:
 define <8 x i16> @sub_absv_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
 ; CHECK-LABEL: sub_absv_16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsubuhm v2, v2, v3
-; CHECK-NEXT:    xxlxor v3, v3, v3
-; CHECK-NEXT:    vsubuhm v3, v3, v2
+; CHECK-NEXT:    vminsh v4, v2, v3
 ; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-NEXT:    blr
 entry:
   %0 = sub nsw <8 x i16> %a, %b
@@ -159,10 +167,9 @@ entry:
 define <16 x i8> @sub_absv_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
 ; CHECK-LABEL: sub_absv_8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsububm v2, v2, v3
-; CHECK-NEXT:    xxlxor v3, v3, v3
-; CHECK-NEXT:    vsububm v3, v3, v2
+; CHECK-NEXT:    vminsb v4, v2, v3
 ; CHECK-NEXT:    vmaxsb v2, v2, v3
+; CHECK-NEXT:    vsububm v2, v2, v4
 ; CHECK-NEXT:    blr
 entry:
   %0 = sub nsw <16 x i8> %a, %b
@@ -178,76 +185,12 @@ entry:
 ; Threfore, we end up doing more work than is required with a pair of abs for word
 ;  instead of just one for the halfword.
 define <8 x i16> @sub_absv_16_ext(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
-; CHECK-PWR9-LABEL: sub_absv_16_ext:
-; CHECK-PWR9:       # %bb.0: # %entry
-; CHECK-PWR9-NEXT:    vmrghh v4, v2, v2
-; CHECK-PWR9-NEXT:    vmrglh v2, v2, v2
-; CHECK-PWR9-NEXT:    vmrghh v5, v3, v3
-; CHECK-PWR9-NEXT:    vmrglh v3, v3, v3
-; CHECK-PWR9-NEXT:    vextsh2w v2, v2
-; CHECK-PWR9-NEXT:    vextsh2w v3, v3
-; CHECK-PWR9-NEXT:    vextsh2w v4, v4
-; CHECK-PWR9-NEXT:    vextsh2w v5, v5
-; CHECK-PWR9-NEXT:    xvnegsp v3, v3
-; CHECK-PWR9-NEXT:    xvnegsp v2, v2
-; CHECK-PWR9-NEXT:    xvnegsp v4, v4
-; CHECK-PWR9-NEXT:    vabsduw v2, v2, v3
-; CHECK-PWR9-NEXT:    xvnegsp v3, v5
-; CHECK-PWR9-NEXT:    vabsduw v3, v4, v3
-; CHECK-PWR9-NEXT:    vpkuwum v2, v3, v2
-; CHECK-PWR9-NEXT:    blr
-;
-; CHECK-PWR8-LABEL: sub_absv_16_ext:
-; CHECK-PWR8:       # %bb.0: # %entry
-; CHECK-PWR8-NEXT:    vspltisw v4, 8
-; CHECK-PWR8-NEXT:    vmrglh v5, v2, v2
-; CHECK-PWR8-NEXT:    vadduwm v4, v4, v4
-; CHECK-PWR8-NEXT:    vmrghh v2, v2, v2
-; CHECK-PWR8-NEXT:    vmrglh v0, v3, v3
-; CHECK-PWR8-NEXT:    vmrghh v3, v3, v3
-; CHECK-PWR8-NEXT:    vslw v5, v5, v4
-; CHECK-PWR8-NEXT:    vslw v2, v2, v4
-; CHECK-PWR8-NEXT:    vslw v0, v0, v4
-; CHECK-PWR8-NEXT:    vslw v3, v3, v4
-; CHECK-PWR8-NEXT:    vsraw v5, v5, v4
-; CHECK-PWR8-NEXT:    vsraw v2, v2, v4
-; CHECK-PWR8-NEXT:    vsraw v0, v0, v4
-; CHECK-PWR8-NEXT:    vsraw v3, v3, v4
-; CHECK-PWR8-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR8-NEXT:    vsubuwm v2, v2, v3
-; CHECK-PWR8-NEXT:    vsubuwm v3, v5, v0
-; CHECK-PWR8-NEXT:    vsubuwm v5, v4, v3
-; CHECK-PWR8-NEXT:    vsubuwm v4, v4, v2
-; CHECK-PWR8-NEXT:    vmaxsw v3, v3, v5
-; CHECK-PWR8-NEXT:    vmaxsw v2, v2, v4
-; CHECK-PWR8-NEXT:    vpkuwum v2, v2, v3
-; CHECK-PWR8-NEXT:    blr
-;
-; CHECK-PWR7-LABEL: sub_absv_16_ext:
-; CHECK-PWR7:       # %bb.0: # %entry
-; CHECK-PWR7-NEXT:    vspltisw v4, 8
-; CHECK-PWR7-NEXT:    vmrglh v5, v2, v2
-; CHECK-PWR7-NEXT:    vmrghh v2, v2, v2
-; CHECK-PWR7-NEXT:    vmrglh v0, v3, v3
-; CHECK-PWR7-NEXT:    vmrghh v3, v3, v3
-; CHECK-PWR7-NEXT:    vadduwm v4, v4, v4
-; CHECK-PWR7-NEXT:    vslw v5, v5, v4
-; CHECK-PWR7-NEXT:    vslw v2, v2, v4
-; CHECK-PWR7-NEXT:    vslw v0, v0, v4
-; CHECK-PWR7-NEXT:    vslw v3, v3, v4
-; CHECK-PWR7-NEXT:    vsraw v5, v5, v4
-; CHECK-PWR7-NEXT:    vsraw v2, v2, v4
-; CHECK-PWR7-NEXT:    vsraw v0, v0, v4
-; CHECK-PWR7-NEXT:    vsraw v3, v3, v4
-; CHECK-PWR7-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR7-NEXT:    vsubuwm v2, v2, v3
-; CHECK-PWR7-NEXT:    vsubuwm v3, v5, v0
-; CHECK-PWR7-NEXT:    vsubuwm v5, v4, v3
-; CHECK-PWR7-NEXT:    vsubuwm v4, v4, v2
-; CHECK-PWR7-NEXT:    vmaxsw v3, v3, v5
-; CHECK-PWR7-NEXT:    vmaxsw v2, v2, v4
-; CHECK-PWR7-NEXT:    vpkuwum v2, v2, v3
-; CHECK-PWR7-NEXT:    blr
+; CHECK-LABEL: sub_absv_16_ext:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vminsh v4, v2, v3
+; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
+; CHECK-NEXT:    blr
 entry:
   %0 = sext <8 x i16> %a to <8 x i32>
   %1 = sext <8 x i16> %b to <8 x i32>
@@ -266,169 +209,173 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ; CHECK-PWR9-LE-LABEL: sub_absv_8_ext:
 ; CHECK-PWR9-LE:       # %bb.0: # %entry
 ; CHECK-PWR9-LE-NEXT:    li r3, 0
-; CHECK-PWR9-LE-NEXT:    li r5, 2
 ; CHECK-PWR9-LE-NEXT:    li r4, 1
+; CHECK-PWR9-LE-NEXT:    li r5, 2
 ; CHECK-PWR9-LE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-LE-NEXT:    vextubrx r6, r3, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r3, r3, v3
-; CHECK-PWR9-LE-NEXT:    vextubrx r8, r5, v2
-; CHECK-PWR9-LE-NEXT:    vextubrx r5, r5, v3
+; CHECK-PWR9-LE-NEXT:    vextubrx r7, r4, v2
+; CHECK-PWR9-LE-NEXT:    vextubrx r4, r4, v3
 ; CHECK-PWR9-LE-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-LE-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-LE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-LE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-LE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
+; CHECK-PWR9-LE-NEXT:    std r24, -64(r1) # 8-byte Folded Spill
+; CHECK-PWR9-LE-NEXT:    vextubrx r8, r5, v2
+; CHECK-PWR9-LE-NEXT:    std r23, -72(r1) # 8-byte Folded Spill
+; CHECK-PWR9-LE-NEXT:    vextubrx r5, r5, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r6, r6, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r3, r3, 24
-; CHECK-PWR9-LE-NEXT:    clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT:    clrlwi r5, r5, 24
-; CHECK-PWR9-LE-NEXT:    vextubrx r7, r4, v2
-; CHECK-PWR9-LE-NEXT:    vextubrx r4, r4, v3
-; CHECK-PWR9-LE-NEXT:    sub r3, r6, r3
-; CHECK-PWR9-LE-NEXT:    sub r6, r8, r5
+; CHECK-PWR9-LE-NEXT:    cmplw r6, r3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r7, r7, 24
-; CHECK-PWR9-LE-NEXT:    clrlwi r4, r4, 24
-; CHECK-PWR9-LE-NEXT:    sub r4, r7, r4
-; CHECK-PWR9-LE-NEXT:    srawi r5, r3, 31
-; CHECK-PWR9-LE-NEXT:    srawi r7, r4, 31
-; CHECK-PWR9-LE-NEXT:    xor r3, r3, r5
-; CHECK-PWR9-LE-NEXT:    xor r4, r4, r7
-; CHECK-PWR9-LE-NEXT:    sub r5, r3, r5
-; CHECK-PWR9-LE-NEXT:    srawi r3, r6, 31
-; CHECK-PWR9-LE-NEXT:    sub r4, r4, r7
-; CHECK-PWR9-LE-NEXT:    xor r6, r6, r3
-; CHECK-PWR9-LE-NEXT:    sub r3, r6, r3
+; CHECK-PWR9-LE-NEXT:    clrlwi r9, r4, 24
+; CHECK-PWR9-LE-NEXT:    clrlwi r10, r5, 24
+; CHECK-PWR9-LE-NEXT:    sub r4, r3, r6
+; CHECK-PWR9-LE-NEXT:    sub r5, r6, r3
+; CHECK-PWR9-LE-NEXT:    sub r3, r9, r7
+; CHECK-PWR9-LE-NEXT:    sub r6, r7, r9
+; CHECK-PWR9-LE-NEXT:    clrlwi r8, r8, 24
+; CHECK-PWR9-LE-NEXT:    iselgt r4, r5, r4
+; CHECK-PWR9-LE-NEXT:    cmplw r7, r9
+; CHECK-PWR9-LE-NEXT:    sub r7, r10, r8
+; CHECK-PWR9-LE-NEXT:    iselgt r5, r6, r3
+; CHECK-PWR9-LE-NEXT:    sub r3, r8, r10
+; CHECK-PWR9-LE-NEXT:    cmplw r8, r10
 ; CHECK-PWR9-LE-NEXT:    li r6, 3
+; CHECK-PWR9-LE-NEXT:    iselgt r3, r3, r7
 ; CHECK-PWR9-LE-NEXT:    vextubrx r7, r6, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r6, r6, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r7, r7, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r6, r6, 24
-; CHECK-PWR9-LE-NEXT:    sub r6, r7, r6
-; CHECK-PWR9-LE-NEXT:    srawi r7, r6, 31
-; CHECK-PWR9-LE-NEXT:    xor r6, r6, r7
-; CHECK-PWR9-LE-NEXT:    sub r6, r6, r7
+; CHECK-PWR9-LE-NEXT:    sub r8, r6, r7
+; CHECK-PWR9-LE-NEXT:    sub r9, r7, r6
+; CHECK-PWR9-LE-NEXT:    cmplw r7, r6
 ; CHECK-PWR9-LE-NEXT:    li r7, 4
+; CHECK-PWR9-LE-NEXT:    iselgt r6, r9, r8
 ; CHECK-PWR9-LE-NEXT:    vextubrx r8, r7, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r7, r7, v3
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v4, r6
 ; CHECK-PWR9-LE-NEXT:    clrlwi r8, r8, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r7, r7, 24
-; CHECK-PWR9-LE-NEXT:    sub r7, r8, r7
-; CHECK-PWR9-LE-NEXT:    srawi r8, r7, 31
-; CHECK-PWR9-LE-NEXT:    xor r7, r7, r8
-; CHECK-PWR9-LE-NEXT:    sub r7, r7, r8
+; CHECK-PWR9-LE-NEXT:    sub r9, r7, r8
+; CHECK-PWR9-LE-NEXT:    sub r10, r8, r7
+; CHECK-PWR9-LE-NEXT:    cmplw r8, r7
 ; CHECK-PWR9-LE-NEXT:    li r8, 5
+; CHECK-PWR9-LE-NEXT:    iselgt r7, r10, r9
 ; CHECK-PWR9-LE-NEXT:    vextubrx r9, r8, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r8, r8, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r9, r9, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r8, r8, 24
-; CHECK-PWR9-LE-NEXT:    sub r8, r9, r8
-; CHECK-PWR9-LE-NEXT:    srawi r9, r8, 31
-; CHECK-PWR9-LE-NEXT:    xor r8, r8, r9
-; CHECK-PWR9-LE-NEXT:    sub r8, r8, r9
+; CHECK-PWR9-LE-NEXT:    sub r10, r8, r9
+; CHECK-PWR9-LE-NEXT:    sub r11, r9, r8
+; CHECK-PWR9-LE-NEXT:    cmplw r9, r8
 ; CHECK-PWR9-LE-NEXT:    li r9, 6
+; CHECK-PWR9-LE-NEXT:    iselgt r8, r11, r10
 ; CHECK-PWR9-LE-NEXT:    vextubrx r10, r9, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r9, r9, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r10, r10, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r9, r9, 24
-; CHECK-PWR9-LE-NEXT:    sub r9, r10, r9
-; CHECK-PWR9-LE-NEXT:    srawi r10, r9, 31
-; CHECK-PWR9-LE-NEXT:    xor r9, r9, r10
-; CHECK-PWR9-LE-NEXT:    sub r9, r9, r10
+; CHECK-PWR9-LE-NEXT:    sub r11, r9, r10
+; CHECK-PWR9-LE-NEXT:    sub r12, r10, r9
+; CHECK-PWR9-LE-NEXT:    cmplw r10, r9
 ; CHECK-PWR9-LE-NEXT:    li r10, 7
+; CHECK-PWR9-LE-NEXT:    iselgt r9, r12, r11
 ; CHECK-PWR9-LE-NEXT:    vextubrx r11, r10, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r10, r10, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r11, r11, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r10, r10, 24
-; CHECK-PWR9-LE-NEXT:    sub r10, r11, r10
-; CHECK-PWR9-LE-NEXT:    srawi r11, r10, 31
-; CHECK-PWR9-LE-NEXT:    xor r10, r10, r11
-; CHECK-PWR9-LE-NEXT:    sub r10, r10, r11
+; CHECK-PWR9-LE-NEXT:    sub r12, r10, r11
+; CHECK-PWR9-LE-NEXT:    sub r30, r11, r10
+; CHECK-PWR9-LE-NEXT:    cmplw r11, r10
 ; CHECK-PWR9-LE-NEXT:    li r11, 8
+; CHECK-PWR9-LE-NEXT:    iselgt r10, r30, r12
 ; CHECK-PWR9-LE-NEXT:    vextubrx r12, r11, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r11, r11, v3
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v5, r10
 ; CHECK-PWR9-LE-NEXT:    clrlwi r12, r12, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r11, r11, 24
-; CHECK-PWR9-LE-NEXT:    sub r11, r12, r11
-; CHECK-PWR9-LE-NEXT:    srawi r12, r11, 31
-; CHECK-PWR9-LE-NEXT:    xor r11, r11, r12
-; CHECK-PWR9-LE-NEXT:    sub r11, r11, r12
+; CHECK-PWR9-LE-NEXT:    sub r30, r11, r12
+; CHECK-PWR9-LE-NEXT:    sub r29, r12, r11
+; CHECK-PWR9-LE-NEXT:    cmplw r12, r11
 ; CHECK-PWR9-LE-NEXT:    li r12, 9
 ; CHECK-PWR9-LE-NEXT:    vextubrx r0, r12, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r12, r12, v3
+; CHECK-PWR9-LE-NEXT:    iselgt r11, r29, r30
 ; CHECK-PWR9-LE-NEXT:    clrlwi r0, r0, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r12, r12, 24
-; CHECK-PWR9-LE-NEXT:    sub r12, r0, r12
-; CHECK-PWR9-LE-NEXT:    srawi r0, r12, 31
-; CHECK-PWR9-LE-NEXT:    xor r12, r12, r0
-; CHECK-PWR9-LE-NEXT:    sub r12, r12, r0
+; CHECK-PWR9-LE-NEXT:    sub r30, r12, r0
+; CHECK-PWR9-LE-NEXT:    sub r29, r0, r12
+; CHECK-PWR9-LE-NEXT:    cmplw r0, r12
 ; CHECK-PWR9-LE-NEXT:    li r0, 10
+; CHECK-PWR9-LE-NEXT:    iselgt r12, r29, r30
 ; CHECK-PWR9-LE-NEXT:    vextubrx r30, r0, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r0, r0, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r30, r30, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r0, r0, 24
-; CHECK-PWR9-LE-NEXT:    sub r0, r30, r0
-; CHECK-PWR9-LE-NEXT:    srawi r30, r0, 31
-; CHECK-PWR9-LE-NEXT:    xor r0, r0, r30
-; CHECK-PWR9-LE-NEXT:    sub r0, r0, r30
+; CHECK-PWR9-LE-NEXT:    sub r29, r0, r30
+; CHECK-PWR9-LE-NEXT:    sub r28, r30, r0
+; CHECK-PWR9-LE-NEXT:    cmplw r30, r0
 ; CHECK-PWR9-LE-NEXT:    li r30, 11
+; CHECK-PWR9-LE-NEXT:    iselgt r0, r28, r29
 ; CHECK-PWR9-LE-NEXT:    vextubrx r29, r30, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r30, r30, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r29, r29, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r30, r30, 24
-; CHECK-PWR9-LE-NEXT:    sub r30, r29, r30
-; CHECK-PWR9-LE-NEXT:    srawi r29, r30, 31
-; CHECK-PWR9-LE-NEXT:    xor r30, r30, r29
-; CHECK-PWR9-LE-NEXT:    sub r30, r30, r29
+; CHECK-PWR9-LE-NEXT:    sub r28, r30, r29
+; CHECK-PWR9-LE-NEXT:    sub r27, r29, r30
+; CHECK-PWR9-LE-NEXT:    cmplw r29, r30
 ; CHECK-PWR9-LE-NEXT:    li r29, 12
+; CHECK-PWR9-LE-NEXT:    iselgt r30, r27, r28
 ; CHECK-PWR9-LE-NEXT:    vextubrx r28, r29, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r29, r29, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r28, r28, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r29, r29, 24
-; CHECK-PWR9-LE-NEXT:    sub r29, r28, r29
-; CHECK-PWR9-LE-NEXT:    srawi r28, r29, 31
-; CHECK-PWR9-LE-NEXT:    xor r29, r29, r28
-; CHECK-PWR9-LE-NEXT:    sub r29, r29, r28
+; CHECK-PWR9-LE-NEXT:    sub r27, r29, r28
+; CHECK-PWR9-LE-NEXT:    sub r26, r28, r29
+; CHECK-PWR9-LE-NEXT:    cmplw r28, r29
 ; CHECK-PWR9-LE-NEXT:    li r28, 13
+; CHECK-PWR9-LE-NEXT:    iselgt r29, r26, r27
 ; CHECK-PWR9-LE-NEXT:    vextubrx r27, r28, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r28, r28, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r27, r27, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r28, r28, 24
-; CHECK-PWR9-LE-NEXT:    sub r28, r27, r28
-; CHECK-PWR9-LE-NEXT:    srawi r27, r28, 31
-; CHECK-PWR9-LE-NEXT:    xor r28, r28, r27
-; CHECK-PWR9-LE-NEXT:    sub r28, r28, r27
+; CHECK-PWR9-LE-NEXT:    sub r26, r28, r27
+; CHECK-PWR9-LE-NEXT:    sub r25, r27, r28
+; CHECK-PWR9-LE-NEXT:    cmplw r27, r28
 ; CHECK-PWR9-LE-NEXT:    li r27, 14
+; CHECK-PWR9-LE-NEXT:    iselgt r28, r25, r26
 ; CHECK-PWR9-LE-NEXT:    vextubrx r26, r27, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r27, r27, v3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r26, r26, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r27, r27, 24
-; CHECK-PWR9-LE-NEXT:    sub r27, r26, r27
-; CHECK-PWR9-LE-NEXT:    srawi r26, r27, 31
-; CHECK-PWR9-LE-NEXT:    xor r27, r27, r26
-; CHECK-PWR9-LE-NEXT:    sub r27, r27, r26
+; CHECK-PWR9-LE-NEXT:    sub r25, r27, r26
+; CHECK-PWR9-LE-NEXT:    sub r24, r26, r27
+; CHECK-PWR9-LE-NEXT:    cmplw r26, r27
 ; CHECK-PWR9-LE-NEXT:    li r26, 15
+; CHECK-PWR9-LE-NEXT:    iselgt r27, r24, r25
 ; CHECK-PWR9-LE-NEXT:    vextubrx r25, r26, v2
 ; CHECK-PWR9-LE-NEXT:    vextubrx r26, r26, v3
-; CHECK-PWR9-LE-NEXT:    mtvsrd v2, r5
-; CHECK-PWR9-LE-NEXT:    mtvsrd v3, r4
+; CHECK-PWR9-LE-NEXT:    mtvsrd v2, r4
+; CHECK-PWR9-LE-NEXT:    mtvsrd v3, r5
 ; CHECK-PWR9-LE-NEXT:    vmrghb v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v3, r3
 ; CHECK-PWR9-LE-NEXT:    clrlwi r25, r25, 24
 ; CHECK-PWR9-LE-NEXT:    clrlwi r26, r26, 24
 ; CHECK-PWR9-LE-NEXT:    vmrghb v3, v4, v3
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v4, r8
-; CHECK-PWR9-LE-NEXT:    sub r26, r25, r26
+; CHECK-PWR9-LE-NEXT:    sub r24, r26, r25
+; CHECK-PWR9-LE-NEXT:    sub r23, r25, r26
+; CHECK-PWR9-LE-NEXT:    cmplw r25, r26
+; CHECK-PWR9-LE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-LE-NEXT:    vmrglh v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v3, r7
-; CHECK-PWR9-LE-NEXT:    srawi r25, r26, 31
+; CHECK-PWR9-LE-NEXT:    iselgt r26, r23, r24
+; CHECK-PWR9-LE-NEXT:    ld r24, -64(r1) # 8-byte Folded Reload
+; CHECK-PWR9-LE-NEXT:    ld r23, -72(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-LE-NEXT:    vmrghb v3, v4, v3
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v4, r9
-; CHECK-PWR9-LE-NEXT:    xor r26, r26, r25
 ; CHECK-PWR9-LE-NEXT:    vmrghb v4, v5, v4
-; CHECK-PWR9-LE-NEXT:    sub r26, r26, r25
-; CHECK-PWR9-LE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-LE-NEXT:    mtvsrd v5, r26
 ; CHECK-PWR9-LE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-LE-NEXT:    vmrglh v3, v4, v3
@@ -469,153 +416,157 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ; CHECK-PWR9-BE-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-BE-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
 ; CHECK-PWR9-BE-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
+; CHECK-PWR9-BE-NEXT:    std r24, -64(r1) # 8-byte Folded Spill
+; CHECK-PWR9-BE-NEXT:    vextublx r8, r5, v2
+; CHECK-PWR9-BE-NEXT:    std r23, -72(r1) # 8-byte Folded Spill
+; CHECK-PWR9-BE-NEXT:    vextublx r5, r5, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r6, r6, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r3, r3, 24
+; CHECK-PWR9-BE-NEXT:    sub r9, r3, r6
 ; CHECK-PWR9-BE-NEXT:    clrlwi r7, r7, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r4, r4, 24
-; CHECK-PWR9-BE-NEXT:    vextublx r8, r5, v2
-; CHECK-PWR9-BE-NEXT:    vextublx r5, r5, v3
-; CHECK-PWR9-BE-NEXT:    sub r3, r6, r3
-; CHECK-PWR9-BE-NEXT:    sub r4, r7, r4
+; CHECK-PWR9-BE-NEXT:    sub r10, r6, r3
+; CHECK-PWR9-BE-NEXT:    cmplw r6, r3
+; CHECK-PWR9-BE-NEXT:    sub r6, r4, r7
+; CHECK-PWR9-BE-NEXT:    sub r11, r7, r4
 ; CHECK-PWR9-BE-NEXT:    clrlwi r8, r8, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r5, r5, 24
-; CHECK-PWR9-BE-NEXT:    sub r5, r8, r5
-; CHECK-PWR9-BE-NEXT:    srawi r6, r3, 31
-; CHECK-PWR9-BE-NEXT:    srawi r7, r4, 31
-; CHECK-PWR9-BE-NEXT:    srawi r8, r5, 31
-; CHECK-PWR9-BE-NEXT:    xor r3, r3, r6
-; CHECK-PWR9-BE-NEXT:    xor r4, r4, r7
-; CHECK-PWR9-BE-NEXT:    xor r5, r5, r8
-; CHECK-PWR9-BE-NEXT:    sub r3, r3, r6
+; CHECK-PWR9-BE-NEXT:    iselgt r3, r10, r9
+; CHECK-PWR9-BE-NEXT:    cmplw r7, r4
+; CHECK-PWR9-BE-NEXT:    sub r7, r5, r8
+; CHECK-PWR9-BE-NEXT:    sub r9, r8, r5
+; CHECK-PWR9-BE-NEXT:    iselgt r4, r11, r6
+; CHECK-PWR9-BE-NEXT:    cmplw r8, r5
 ; CHECK-PWR9-BE-NEXT:    li r6, 3
-; CHECK-PWR9-BE-NEXT:    sub r4, r4, r7
-; CHECK-PWR9-BE-NEXT:    sub r5, r5, r8
+; CHECK-PWR9-BE-NEXT:    iselgt r5, r9, r7
 ; CHECK-PWR9-BE-NEXT:    vextublx r7, r6, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r6, r6, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r7, r7, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r6, r6, 24
-; CHECK-PWR9-BE-NEXT:    sub r6, r7, r6
-; CHECK-PWR9-BE-NEXT:    srawi r7, r6, 31
-; CHECK-PWR9-BE-NEXT:    xor r6, r6, r7
-; CHECK-PWR9-BE-NEXT:    sub r6, r6, r7
+; CHECK-PWR9-BE-NEXT:    sub r8, r6, r7
+; CHECK-PWR9-BE-NEXT:    sub r9, r7, r6
+; CHECK-PWR9-BE-NEXT:    cmplw r7, r6
 ; CHECK-PWR9-BE-NEXT:    li r7, 4
+; CHECK-PWR9-BE-NEXT:    iselgt r6, r9, r8
 ; CHECK-PWR9-BE-NEXT:    vextublx r8, r7, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r7, r7, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r8, r8, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r7, r7, 24
-; CHECK-PWR9-BE-NEXT:    sub r7, r8, r7
-; CHECK-PWR9-BE-NEXT:    srawi r8, r7, 31
-; CHECK-PWR9-BE-NEXT:    xor r7, r7, r8
-; CHECK-PWR9-BE-NEXT:    sub r7, r7, r8
+; CHECK-PWR9-BE-NEXT:    sub r9, r7, r8
+; CHECK-PWR9-BE-NEXT:    sub r10, r8, r7
+; CHECK-PWR9-BE-NEXT:    cmplw r8, r7
 ; CHECK-PWR9-BE-NEXT:    li r8, 5
+; CHECK-PWR9-BE-NEXT:    iselgt r7, r10, r9
 ; CHECK-PWR9-BE-NEXT:    vextublx r9, r8, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r8, r8, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r9, r9, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r8, r8, 24
-; CHECK-PWR9-BE-NEXT:    sub r8, r9, r8
-; CHECK-PWR9-BE-NEXT:    srawi r9, r8, 31
-; CHECK-PWR9-BE-NEXT:    xor r8, r8, r9
-; CHECK-PWR9-BE-NEXT:    sub r8, r8, r9
+; CHECK-PWR9-BE-NEXT:    sub r10, r8, r9
+; CHECK-PWR9-BE-NEXT:    sub r11, r9, r8
+; CHECK-PWR9-BE-NEXT:    cmplw r9, r8
 ; CHECK-PWR9-BE-NEXT:    li r9, 6
+; CHECK-PWR9-BE-NEXT:    iselgt r8, r11, r10
 ; CHECK-PWR9-BE-NEXT:    vextublx r10, r9, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r9, r9, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r10, r10, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r9, r9, 24
-; CHECK-PWR9-BE-NEXT:    sub r9, r10, r9
-; CHECK-PWR9-BE-NEXT:    srawi r10, r9, 31
-; CHECK-PWR9-BE-NEXT:    xor r9, r9, r10
-; CHECK-PWR9-BE-NEXT:    sub r9, r9, r10
+; CHECK-PWR9-BE-NEXT:    sub r11, r9, r10
+; CHECK-PWR9-BE-NEXT:    sub r12, r10, r9
+; CHECK-PWR9-BE-NEXT:    cmplw r10, r9
 ; CHECK-PWR9-BE-NEXT:    li r10, 7
+; CHECK-PWR9-BE-NEXT:    iselgt r9, r12, r11
 ; CHECK-PWR9-BE-NEXT:    vextublx r11, r10, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r10, r10, v3
 ; CHECK-PWR9-BE-NEXT:    mtfprwz f2, r9
 ; CHECK-PWR9-BE-NEXT:    clrlwi r11, r11, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r10, r10, 24
-; CHECK-PWR9-BE-NEXT:    sub r10, r11, r10
-; CHECK-PWR9-BE-NEXT:    srawi r11, r10, 31
-; CHECK-PWR9-BE-NEXT:    xor r10, r10, r11
-; CHECK-PWR9-BE-NEXT:    sub r10, r10, r11
+; CHECK-PWR9-BE-NEXT:    sub r12, r10, r11
+; CHECK-PWR9-BE-NEXT:    sub r30, r11, r10
+; CHECK-PWR9-BE-NEXT:    cmplw r11, r10
 ; CHECK-PWR9-BE-NEXT:    li r11, 8
+; CHECK-PWR9-BE-NEXT:    iselgt r10, r30, r12
 ; CHECK-PWR9-BE-NEXT:    vextublx r12, r11, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r11, r11, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r12, r12, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r11, r11, 24
-; CHECK-PWR9-BE-NEXT:    sub r11, r12, r11
-; CHECK-PWR9-BE-NEXT:    srawi r12, r11, 31
-; CHECK-PWR9-BE-NEXT:    xor r11, r11, r12
-; CHECK-PWR9-BE-NEXT:    sub r11, r11, r12
+; CHECK-PWR9-BE-NEXT:    sub r30, r11, r12
+; CHECK-PWR9-BE-NEXT:    sub r29, r12, r11
+; CHECK-PWR9-BE-NEXT:    cmplw r12, r11
 ; CHECK-PWR9-BE-NEXT:    li r12, 9
 ; CHECK-PWR9-BE-NEXT:    vextublx r0, r12, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r12, r12, v3
+; CHECK-PWR9-BE-NEXT:    iselgt r11, r29, r30
 ; CHECK-PWR9-BE-NEXT:    clrlwi r0, r0, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r12, r12, 24
-; CHECK-PWR9-BE-NEXT:    sub r12, r0, r12
-; CHECK-PWR9-BE-NEXT:    srawi r0, r12, 31
-; CHECK-PWR9-BE-NEXT:    xor r12, r12, r0
-; CHECK-PWR9-BE-NEXT:    sub r12, r12, r0
+; CHECK-PWR9-BE-NEXT:    sub r30, r12, r0
+; CHECK-PWR9-BE-NEXT:    sub r29, r0, r12
+; CHECK-PWR9-BE-NEXT:    cmplw r0, r12
 ; CHECK-PWR9-BE-NEXT:    li r0, 10
+; CHECK-PWR9-BE-NEXT:    iselgt r12, r29, r30
 ; CHECK-PWR9-BE-NEXT:    vextublx r30, r0, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r0, r0, v3
 ; CHECK-PWR9-BE-NEXT:    mtvsrwz v4, r12
 ; CHECK-PWR9-BE-NEXT:    clrlwi r30, r30, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r0, r0, 24
-; CHECK-PWR9-BE-NEXT:    sub r0, r30, r0
-; CHECK-PWR9-BE-NEXT:    srawi r30, r0, 31
-; CHECK-PWR9-BE-NEXT:    xor r0, r0, r30
-; CHECK-PWR9-BE-NEXT:    sub r0, r0, r30
+; CHECK-PWR9-BE-NEXT:    sub r29, r0, r30
+; CHECK-PWR9-BE-NEXT:    sub r28, r30, r0
+; CHECK-PWR9-BE-NEXT:    cmplw r30, r0
 ; CHECK-PWR9-BE-NEXT:    li r30, 11
+; CHECK-PWR9-BE-NEXT:    iselgt r0, r28, r29
 ; CHECK-PWR9-BE-NEXT:    vextublx r29, r30, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r30, r30, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r29, r29, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r30, r30, 24
-; CHECK-PWR9-BE-NEXT:    sub r30, r29, r30
-; CHECK-PWR9-BE-NEXT:    srawi r29, r30, 31
-; CHECK-PWR9-BE-NEXT:    xor r30, r30, r29
-; CHECK-PWR9-BE-NEXT:    sub r30, r30, r29
+; CHECK-PWR9-BE-NEXT:    sub r28, r30, r29
+; CHECK-PWR9-BE-NEXT:    sub r27, r29, r30
+; CHECK-PWR9-BE-NEXT:    cmplw r29, r30
 ; CHECK-PWR9-BE-NEXT:    li r29, 12
+; CHECK-PWR9-BE-NEXT:    iselgt r30, r27, r28
 ; CHECK-PWR9-BE-NEXT:    vextublx r28, r29, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r29, r29, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r28, r28, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r29, r29, 24
-; CHECK-PWR9-BE-NEXT:    sub r29, r28, r29
-; CHECK-PWR9-BE-NEXT:    srawi r28, r29, 31
-; CHECK-PWR9-BE-NEXT:    xor r29, r29, r28
-; CHECK-PWR9-BE-NEXT:    sub r29, r29, r28
+; CHECK-PWR9-BE-NEXT:    sub r27, r29, r28
+; CHECK-PWR9-BE-NEXT:    sub r26, r28, r29
+; CHECK-PWR9-BE-NEXT:    cmplw r28, r29
 ; CHECK-PWR9-BE-NEXT:    li r28, 13
+; CHECK-PWR9-BE-NEXT:    iselgt r29, r26, r27
 ; CHECK-PWR9-BE-NEXT:    vextublx r27, r28, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r28, r28, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r27, r27, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r28, r28, 24
-; CHECK-PWR9-BE-NEXT:    sub r28, r27, r28
-; CHECK-PWR9-BE-NEXT:    srawi r27, r28, 31
-; CHECK-PWR9-BE-NEXT:    xor r28, r28, r27
-; CHECK-PWR9-BE-NEXT:    sub r28, r28, r27
+; CHECK-PWR9-BE-NEXT:    sub r26, r28, r27
+; CHECK-PWR9-BE-NEXT:    sub r25, r27, r28
+; CHECK-PWR9-BE-NEXT:    cmplw r27, r28
 ; CHECK-PWR9-BE-NEXT:    li r27, 14
+; CHECK-PWR9-BE-NEXT:    iselgt r28, r25, r26
 ; CHECK-PWR9-BE-NEXT:    vextublx r26, r27, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r27, r27, v3
 ; CHECK-PWR9-BE-NEXT:    clrlwi r26, r26, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r27, r27, 24
-; CHECK-PWR9-BE-NEXT:    sub r27, r26, r27
-; CHECK-PWR9-BE-NEXT:    srawi r26, r27, 31
-; CHECK-PWR9-BE-NEXT:    xor r27, r27, r26
-; CHECK-PWR9-BE-NEXT:    sub r27, r27, r26
+; CHECK-PWR9-BE-NEXT:    sub r25, r27, r26
+; CHECK-PWR9-BE-NEXT:    sub r24, r26, r27
+; CHECK-PWR9-BE-NEXT:    cmplw r26, r27
 ; CHECK-PWR9-BE-NEXT:    li r26, 15
+; CHECK-PWR9-BE-NEXT:    iselgt r27, r24, r25
 ; CHECK-PWR9-BE-NEXT:    vextublx r25, r26, v2
 ; CHECK-PWR9-BE-NEXT:    vextublx r26, r26, v3
-; CHECK-PWR9-BE-NEXT:    mtfprwz f0, r27
-; CHECK-PWR9-BE-NEXT:    addis r27, r2, .LCPI9_0 at toc@ha
 ; CHECK-PWR9-BE-NEXT:    mtvsrwz v3, r28
 ; CHECK-PWR9-BE-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT:    addi r27, r27, .LCPI9_0 at toc@l
+; CHECK-PWR9-BE-NEXT:    mtfprwz f0, r27
+; CHECK-PWR9-BE-NEXT:    addis r27, r2, .LCPI9_0 at toc@ha
 ; CHECK-PWR9-BE-NEXT:    clrlwi r25, r25, 24
 ; CHECK-PWR9-BE-NEXT:    clrlwi r26, r26, 24
+; CHECK-PWR9-BE-NEXT:    addi r27, r27, .LCPI9_0 at toc@l
+; CHECK-PWR9-BE-NEXT:    sub r24, r26, r25
+; CHECK-PWR9-BE-NEXT:    sub r23, r25, r26
+; CHECK-PWR9-BE-NEXT:    cmplw r25, r26
+; CHECK-PWR9-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-BE-NEXT:    lxv vs1, 0(r27)
 ; CHECK-PWR9-BE-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
-; CHECK-PWR9-BE-NEXT:    sub r26, r25, r26
-; CHECK-PWR9-BE-NEXT:    srawi r25, r26, 31
-; CHECK-PWR9-BE-NEXT:    xor r26, r26, r25
-; CHECK-PWR9-BE-NEXT:    sub r26, r26, r25
-; CHECK-PWR9-BE-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-PWR9-BE-NEXT:    iselgt r26, r23, r24
+; CHECK-PWR9-BE-NEXT:    ld r24, -64(r1) # 8-byte Folded Reload
+; CHECK-PWR9-BE-NEXT:    ld r23, -72(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-BE-NEXT:    mtvsrwz v2, r26
 ; CHECK-PWR9-BE-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
 ; CHECK-PWR9-BE-NEXT:    xxperm v2, vs0, vs1
@@ -650,179 +601,183 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ;
 ; CHECK-PWR8-LABEL: sub_absv_8_ext:
 ; CHECK-PWR8:       # %bb.0: # %entry
+; CHECK-PWR8-NEXT:    mfvsrd r6, v2
+; CHECK-PWR8-NEXT:    mfvsrd r9, v3
+; CHECK-PWR8-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
 ; CHECK-PWR8-NEXT:    xxswapd vs0, v2
 ; CHECK-PWR8-NEXT:    xxswapd vs1, v3
-; CHECK-PWR8-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    std r22, -80(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    std r24, -64(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    mffprd r3, f0
+; CHECK-PWR8-NEXT:    mffprd r4, f1
+; CHECK-PWR8-NEXT:    std r23, -72(r1) # 8-byte Folded Spill
 ; CHECK-PWR8-NEXT:    std r29, -24(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT:    std r26, -48(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT:    mffprd r11, f0
-; CHECK-PWR8-NEXT:    mffprd r8, f1
 ; CHECK-PWR8-NEXT:    std r27, -40(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    std r28, -32(r1) # 8-byte Folded Spill
+; CHECK-PWR8-NEXT:    clrldi r5, r6, 56
+; CHECK-PWR8-NEXT:    clrldi r7, r9, 56
+; CHECK-PWR8-NEXT:    rldicl r8, r6, 56, 56
 ; CHECK-PWR8-NEXT:    std r25, -56(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT:    clrldi r3, r11, 56
-; CHECK-PWR8-NEXT:    clrldi r4, r8, 56
-; CHECK-PWR8-NEXT:    rldicl r5, r11, 56, 56
-; CHECK-PWR8-NEXT:    rldicl r6, r8, 56, 56
-; CHECK-PWR8-NEXT:    rldicl r7, r11, 48, 56
-; CHECK-PWR8-NEXT:    rldicl r9, r8, 48, 56
-; CHECK-PWR8-NEXT:    rldicl r0, r11, 32, 56
-; CHECK-PWR8-NEXT:    rldicl r30, r8, 32, 56
-; CHECK-PWR8-NEXT:    rldicl r29, r11, 24, 56
-; CHECK-PWR8-NEXT:    rldicl r28, r8, 24, 56
-; CHECK-PWR8-NEXT:    rldicl r10, r11, 40, 56
-; CHECK-PWR8-NEXT:    rldicl r12, r8, 40, 56
-; CHECK-PWR8-NEXT:    rldicl r27, r11, 16, 56
-; CHECK-PWR8-NEXT:    rldicl r11, r11, 8, 56
-; CHECK-PWR8-NEXT:    std r24, -64(r1) # 8-byte Folded Spill
-; CHECK-PWR8-NEXT:    clrlwi r3, r3, 24
-; CHECK-PWR8-NEXT:    clrlwi r4, r4, 24
 ; CHECK-PWR8-NEXT:    clrlwi r5, r5, 24
-; CHECK-PWR8-NEXT:    clrlwi r6, r6, 24
 ; CHECK-PWR8-NEXT:    clrlwi r7, r7, 24
-; CHECK-PWR8-NEXT:    clrlwi r9, r9, 24
-; CHECK-PWR8-NEXT:    sub r3, r3, r4
-; CHECK-PWR8-NEXT:    clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT:    clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT:    sub r4, r5, r6
-; CHECK-PWR8-NEXT:    sub r5, r7, r9
+; CHECK-PWR8-NEXT:    clrldi r10, r3, 56
+; CHECK-PWR8-NEXT:    clrldi r11, r4, 56
+; CHECK-PWR8-NEXT:    rldicl r12, r3, 56, 56
+; CHECK-PWR8-NEXT:    rldicl r30, r3, 48, 56
+; CHECK-PWR8-NEXT:    rldicl r26, r3, 32, 56
+; CHECK-PWR8-NEXT:    clrlwi r24, r8, 24
+; CHECK-PWR8-NEXT:    rldicl r0, r4, 56, 56
+; CHECK-PWR8-NEXT:    rldicl r29, r4, 48, 56
+; CHECK-PWR8-NEXT:    rldicl r28, r3, 40, 56
+; CHECK-PWR8-NEXT:    rldicl r27, r4, 40, 56
+; CHECK-PWR8-NEXT:    rldicl r25, r4, 32, 56
+; CHECK-PWR8-NEXT:    cmplw r5, r7
+; CHECK-PWR8-NEXT:    clrlwi r8, r10, 24
+; CHECK-PWR8-NEXT:    clrlwi r10, r11, 24
+; CHECK-PWR8-NEXT:    clrlwi r11, r12, 24
+; CHECK-PWR8-NEXT:    clrlwi r22, r30, 24
+; CHECK-PWR8-NEXT:    sub r30, r7, r5
+; CHECK-PWR8-NEXT:    clrlwi r12, r26, 24
+; CHECK-PWR8-NEXT:    sub r26, r5, r7
+; CHECK-PWR8-NEXT:    sub r5, r10, r8
+; CHECK-PWR8-NEXT:    clrlwi r23, r0, 24
+; CHECK-PWR8-NEXT:    sub r7, r8, r10
 ; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
 ; CHECK-PWR8-NEXT:    clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT:    sub r7, r0, r30
-; CHECK-PWR8-NEXT:    sub r9, r29, r28
-; CHECK-PWR8-NEXT:    clrlwi r10, r10, 24
-; CHECK-PWR8-NEXT:    clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT:    sub r6, r10, r12
 ; CHECK-PWR8-NEXT:    clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT:    clrlwi r11, r11, 24
-; CHECK-PWR8-NEXT:    srawi r0, r5, 31
-; CHECK-PWR8-NEXT:    srawi r29, r7, 31
-; CHECK-PWR8-NEXT:    srawi r12, r4, 31
-; CHECK-PWR8-NEXT:    srawi r28, r9, 31
-; CHECK-PWR8-NEXT:    srawi r30, r6, 31
-; CHECK-PWR8-NEXT:    srawi r10, r3, 31
-; CHECK-PWR8-NEXT:    xor r5, r5, r0
-; CHECK-PWR8-NEXT:    xor r26, r7, r29
-; CHECK-PWR8-NEXT:    sub r7, r5, r0
-; CHECK-PWR8-NEXT:    rldicl r5, r8, 16, 56
-; CHECK-PWR8-NEXT:    rldicl r8, r8, 8, 56
-; CHECK-PWR8-NEXT:    xor r4, r4, r12
-; CHECK-PWR8-NEXT:    xor r25, r9, r28
-; CHECK-PWR8-NEXT:    sub r9, r4, r12
-; CHECK-PWR8-NEXT:    sub r4, r26, r29
-; CHECK-PWR8-NEXT:    mtvsrd v1, r9
-; CHECK-PWR8-NEXT:    clrlwi r5, r5, 24
-; CHECK-PWR8-NEXT:    sub r5, r27, r5
-; CHECK-PWR8-NEXT:    clrlwi r8, r8, 24
-; CHECK-PWR8-NEXT:    sub r8, r11, r8
-; CHECK-PWR8-NEXT:    xor r6, r6, r30
-; CHECK-PWR8-NEXT:    sub r6, r6, r30
-; CHECK-PWR8-NEXT:    xor r3, r3, r10
-; CHECK-PWR8-NEXT:    sub r10, r3, r10
-; CHECK-PWR8-NEXT:    sub r3, r25, r28
-; CHECK-PWR8-NEXT:    mtvsrd v6, r6
-; CHECK-PWR8-NEXT:    mtvsrd v7, r3
-; CHECK-PWR8-NEXT:    srawi r12, r5, 31
-; CHECK-PWR8-NEXT:    srawi r11, r8, 31
-; CHECK-PWR8-NEXT:    xor r5, r5, r12
-; CHECK-PWR8-NEXT:    xor r8, r8, r11
-; CHECK-PWR8-NEXT:    sub r5, r5, r12
-; CHECK-PWR8-NEXT:    sub r8, r8, r11
-; CHECK-PWR8-NEXT:    mfvsrd r11, v2
-; CHECK-PWR8-NEXT:    mfvsrd r12, v3
-; CHECK-PWR8-NEXT:    mtvsrd v8, r8
-; CHECK-PWR8-NEXT:    clrldi r0, r11, 56
-; CHECK-PWR8-NEXT:    clrldi r30, r12, 56
-; CHECK-PWR8-NEXT:    rldicl r29, r12, 56, 56
-; CHECK-PWR8-NEXT:    rldicl r28, r12, 48, 56
-; CHECK-PWR8-NEXT:    rldicl r27, r12, 40, 56
-; CHECK-PWR8-NEXT:    rldicl r26, r12, 32, 56
-; CHECK-PWR8-NEXT:    rldicl r25, r12, 24, 56
-; CHECK-PWR8-NEXT:    rldicl r24, r12, 16, 56
-; CHECK-PWR8-NEXT:    rldicl r12, r12, 8, 56
-; CHECK-PWR8-NEXT:    clrlwi r0, r0, 24
-; CHECK-PWR8-NEXT:    clrlwi r30, r30, 24
+; CHECK-PWR8-NEXT:    clrlwi r0, r25, 24
+; CHECK-PWR8-NEXT:    iselgt r30, r26, r30
+; CHECK-PWR8-NEXT:    cmplw r8, r10
+; CHECK-PWR8-NEXT:    sub r8, r23, r11
+; CHECK-PWR8-NEXT:    sub r26, r11, r23
+; CHECK-PWR8-NEXT:    iselgt r10, r7, r5
+; CHECK-PWR8-NEXT:    cmplw r11, r23
+; CHECK-PWR8-NEXT:    sub r5, r29, r22
+; CHECK-PWR8-NEXT:    sub r7, r22, r29
+; CHECK-PWR8-NEXT:    mtvsrd v2, r30
+; CHECK-PWR8-NEXT:    iselgt r11, r26, r8
+; CHECK-PWR8-NEXT:    cmplw r22, r29
+; CHECK-PWR8-NEXT:    sub r8, r27, r28
+; CHECK-PWR8-NEXT:    sub r29, r28, r27
+; CHECK-PWR8-NEXT:    iselgt r7, r7, r5
+; CHECK-PWR8-NEXT:    cmplw r28, r27
+; CHECK-PWR8-NEXT:    mtvsrd v1, r11
+; CHECK-PWR8-NEXT:    sub r5, r0, r12
+; CHECK-PWR8-NEXT:    iselgt r8, r29, r8
+; CHECK-PWR8-NEXT:    rldicl r29, r9, 56, 56
 ; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT:    clrlwi r28, r28, 24
+; CHECK-PWR8-NEXT:    cmplw r24, r29
+; CHECK-PWR8-NEXT:    sub r28, r29, r24
+; CHECK-PWR8-NEXT:    sub r27, r24, r29
+; CHECK-PWR8-NEXT:    rldicl r29, r6, 48, 56
+; CHECK-PWR8-NEXT:    mtvsrd v6, r8
+; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
+; CHECK-PWR8-NEXT:    iselgt r28, r27, r28
+; CHECK-PWR8-NEXT:    rldicl r27, r9, 48, 56
 ; CHECK-PWR8-NEXT:    clrlwi r27, r27, 24
+; CHECK-PWR8-NEXT:    cmplw r29, r27
+; CHECK-PWR8-NEXT:    sub r26, r27, r29
+; CHECK-PWR8-NEXT:    sub r25, r29, r27
+; CHECK-PWR8-NEXT:    rldicl r29, r6, 40, 56
+; CHECK-PWR8-NEXT:    mtvsrd v3, r28
+; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
+; CHECK-PWR8-NEXT:    iselgt r27, r25, r26
+; CHECK-PWR8-NEXT:    rldicl r26, r9, 40, 56
 ; CHECK-PWR8-NEXT:    clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT:    clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT:    clrlwi r24, r24, 24
-; CHECK-PWR8-NEXT:    clrlwi r12, r12, 24
-; CHECK-PWR8-NEXT:    sub r0, r0, r30
-; CHECK-PWR8-NEXT:    srawi r30, r0, 31
-; CHECK-PWR8-NEXT:    xor r0, r0, r30
-; CHECK-PWR8-NEXT:    sub r0, r0, r30
-; CHECK-PWR8-NEXT:    rldicl r30, r11, 56, 56
-; CHECK-PWR8-NEXT:    clrlwi r30, r30, 24
-; CHECK-PWR8-NEXT:    mtvsrd v2, r0
-; CHECK-PWR8-NEXT:    sub r30, r30, r29
-; CHECK-PWR8-NEXT:    srawi r29, r30, 31
-; CHECK-PWR8-NEXT:    xor r30, r30, r29
-; CHECK-PWR8-NEXT:    sub r30, r30, r29
-; CHECK-PWR8-NEXT:    rldicl r29, r11, 48, 56
+; CHECK-PWR8-NEXT:    cmplw r29, r26
+; CHECK-PWR8-NEXT:    sub r25, r26, r29
+; CHECK-PWR8-NEXT:    sub r24, r29, r26
+; CHECK-PWR8-NEXT:    rldicl r29, r6, 32, 56
+; CHECK-PWR8-NEXT:    rldicl r26, r9, 32, 56
+; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
+; CHECK-PWR8-NEXT:    clrlwi r26, r26, 24
+; CHECK-PWR8-NEXT:    sub r23, r26, r29
+; CHECK-PWR8-NEXT:    sub r22, r29, r26
+; CHECK-PWR8-NEXT:    iselgt r25, r24, r25
+; CHECK-PWR8-NEXT:    cmplw r29, r26
+; CHECK-PWR8-NEXT:    rldicl r29, r6, 24, 56
+; CHECK-PWR8-NEXT:    rldicl r26, r9, 24, 56
 ; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
-; CHECK-PWR8-NEXT:    mtvsrd v3, r30
-; CHECK-PWR8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT:    sub r29, r29, r28
-; CHECK-PWR8-NEXT:    srawi r28, r29, 31
-; CHECK-PWR8-NEXT:    xor r29, r29, r28
-; CHECK-PWR8-NEXT:    sub r29, r29, r28
-; CHECK-PWR8-NEXT:    rldicl r28, r11, 40, 56
-; CHECK-PWR8-NEXT:    clrlwi r28, r28, 24
-; CHECK-PWR8-NEXT:    sub r28, r28, r27
-; CHECK-PWR8-NEXT:    srawi r27, r28, 31
-; CHECK-PWR8-NEXT:    xor r28, r28, r27
-; CHECK-PWR8-NEXT:    sub r28, r28, r27
-; CHECK-PWR8-NEXT:    rldicl r27, r11, 32, 56
-; CHECK-PWR8-NEXT:    clrlwi r27, r27, 24
-; CHECK-PWR8-NEXT:    mtvsrd v4, r28
-; CHECK-PWR8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT:    sub r27, r27, r26
-; CHECK-PWR8-NEXT:    srawi r26, r27, 31
-; CHECK-PWR8-NEXT:    xor r27, r27, r26
-; CHECK-PWR8-NEXT:    sub r27, r27, r26
-; CHECK-PWR8-NEXT:    rldicl r26, r11, 24, 56
 ; CHECK-PWR8-NEXT:    clrlwi r26, r26, 24
-; CHECK-PWR8-NEXT:    sub r26, r26, r25
-; CHECK-PWR8-NEXT:    srawi r25, r26, 31
-; CHECK-PWR8-NEXT:    xor r26, r26, r25
-; CHECK-PWR8-NEXT:    sub r26, r26, r25
-; CHECK-PWR8-NEXT:    rldicl r25, r11, 16, 56
-; CHECK-PWR8-NEXT:    rldicl r11, r11, 8, 56
+; CHECK-PWR8-NEXT:    sub r24, r26, r29
+; CHECK-PWR8-NEXT:    iselgt r23, r22, r23
+; CHECK-PWR8-NEXT:    cmplw r29, r26
+; CHECK-PWR8-NEXT:    sub r22, r29, r26
+; CHECK-PWR8-NEXT:    rldicl r29, r6, 16, 56
+; CHECK-PWR8-NEXT:    rldicl r6, r6, 8, 56
+; CHECK-PWR8-NEXT:    mtvsrd v4, r25
+; CHECK-PWR8-NEXT:    rldicl r25, r3, 24, 56
+; CHECK-PWR8-NEXT:    clrlwi r29, r29, 24
+; CHECK-PWR8-NEXT:    clrlwi r6, r6, 24
 ; CHECK-PWR8-NEXT:    clrlwi r25, r25, 24
-; CHECK-PWR8-NEXT:    clrlwi r11, r11, 24
+; CHECK-PWR8-NEXT:    iselgt r26, r22, r24
+; CHECK-PWR8-NEXT:    rldicl r24, r9, 16, 56
+; CHECK-PWR8-NEXT:    rldicl r9, r9, 8, 56
+; CHECK-PWR8-NEXT:    clrlwi r24, r24, 24
+; CHECK-PWR8-NEXT:    clrlwi r9, r9, 24
+; CHECK-PWR8-NEXT:    cmplw r29, r24
+; CHECK-PWR8-NEXT:    sub r22, r24, r29
+; CHECK-PWR8-NEXT:    sub r24, r29, r24
+; CHECK-PWR8-NEXT:    sub r30, r9, r6
+; CHECK-PWR8-NEXT:    sub r28, r6, r9
+; CHECK-PWR8-NEXT:    sub r29, r12, r0
 ; CHECK-PWR8-NEXT:    mtvsrd v5, r26
-; CHECK-PWR8-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT:    sub r25, r25, r24
-; CHECK-PWR8-NEXT:    sub r11, r11, r12
-; CHECK-PWR8-NEXT:    srawi r24, r25, 31
-; CHECK-PWR8-NEXT:    srawi r12, r11, 31
-; CHECK-PWR8-NEXT:    xor r25, r25, r24
-; CHECK-PWR8-NEXT:    xor r11, r11, r12
-; CHECK-PWR8-NEXT:    sub r25, r25, r24
-; CHECK-PWR8-NEXT:    sub r11, r11, r12
-; CHECK-PWR8-NEXT:    ld r24, -64(r1) # 8-byte Folded Reload
-; CHECK-PWR8-NEXT:    mtvsrd v0, r11
 ; CHECK-PWR8-NEXT:    vmrghb v2, v3, v2
-; CHECK-PWR8-NEXT:    mtvsrd v3, r29
+; CHECK-PWR8-NEXT:    mtvsrd v3, r27
+; CHECK-PWR8-NEXT:    iselgt r27, r24, r22
+; CHECK-PWR8-NEXT:    cmplw r6, r9
+; CHECK-PWR8-NEXT:    rldicl r6, r4, 24, 56
+; CHECK-PWR8-NEXT:    ld r24, -64(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    ld r22, -80(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    clrlwi r6, r6, 24
+; CHECK-PWR8-NEXT:    sub r9, r6, r25
+; CHECK-PWR8-NEXT:    sub r26, r25, r6
+; CHECK-PWR8-NEXT:    iselgt r30, r28, r30
+; CHECK-PWR8-NEXT:    cmplw r12, r0
+; CHECK-PWR8-NEXT:    rldicl r12, r3, 16, 56
+; CHECK-PWR8-NEXT:    rldicl r0, r4, 16, 56
+; CHECK-PWR8-NEXT:    rldicl r3, r3, 8, 56
+; CHECK-PWR8-NEXT:    rldicl r4, r4, 8, 56
+; CHECK-PWR8-NEXT:    clrlwi r12, r12, 24
+; CHECK-PWR8-NEXT:    clrlwi r0, r0, 24
+; CHECK-PWR8-NEXT:    clrlwi r3, r3, 24
+; CHECK-PWR8-NEXT:    clrlwi r4, r4, 24
+; CHECK-PWR8-NEXT:    sub r28, r0, r12
+; CHECK-PWR8-NEXT:    mtvsrd v0, r30
+; CHECK-PWR8-NEXT:    iselgt r5, r29, r5
+; CHECK-PWR8-NEXT:    cmplw r25, r6
+; CHECK-PWR8-NEXT:    sub r30, r12, r0
 ; CHECK-PWR8-NEXT:    ld r29, -24(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
 ; CHECK-PWR8-NEXT:    vmrghb v3, v4, v3
-; CHECK-PWR8-NEXT:    mtvsrd v4, r27
-; CHECK-PWR8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    mtvsrd v4, r23
+; CHECK-PWR8-NEXT:    ld r23, -72(r1) # 8-byte Folded Reload
 ; CHECK-PWR8-NEXT:    vmrglh v2, v3, v2
 ; CHECK-PWR8-NEXT:    vmrghb v4, v5, v4
-; CHECK-PWR8-NEXT:    mtvsrd v5, r25
-; CHECK-PWR8-NEXT:    ld r25, -56(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    mtvsrd v5, r27
+; CHECK-PWR8-NEXT:    ld r27, -40(r1) # 8-byte Folded Reload
 ; CHECK-PWR8-NEXT:    vmrghb v5, v0, v5
 ; CHECK-PWR8-NEXT:    mtvsrd v0, r10
+; CHECK-PWR8-NEXT:    sub r10, r4, r3
 ; CHECK-PWR8-NEXT:    vmrglh v3, v5, v4
 ; CHECK-PWR8-NEXT:    xxmrglw vs0, v3, v2
 ; CHECK-PWR8-NEXT:    vmrghb v0, v1, v0
 ; CHECK-PWR8-NEXT:    mtvsrd v1, r7
+; CHECK-PWR8-NEXT:    sub r7, r3, r4
 ; CHECK-PWR8-NEXT:    vmrghb v1, v6, v1
-; CHECK-PWR8-NEXT:    mtvsrd v6, r4
+; CHECK-PWR8-NEXT:    mtvsrd v6, r5
+; CHECK-PWR8-NEXT:    iselgt r5, r26, r9
+; CHECK-PWR8-NEXT:    cmplw r12, r0
+; CHECK-PWR8-NEXT:    ld r26, -48(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    mtvsrd v7, r5
+; CHECK-PWR8-NEXT:    iselgt r5, r30, r28
+; CHECK-PWR8-NEXT:    cmplw r3, r4
 ; CHECK-PWR8-NEXT:    vmrglh v4, v1, v0
+; CHECK-PWR8-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    ld r28, -32(r1) # 8-byte Folded Reload
+; CHECK-PWR8-NEXT:    iselgt r3, r7, r10
+; CHECK-PWR8-NEXT:    mtvsrd v8, r3
 ; CHECK-PWR8-NEXT:    vmrghb v6, v7, v6
 ; CHECK-PWR8-NEXT:    mtvsrd v7, r5
 ; CHECK-PWR8-NEXT:    vmrghb v7, v8, v7
@@ -833,8 +788,9 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ;
 ; CHECK-PWR7-LABEL: sub_absv_8_ext:
 ; CHECK-PWR7:       # %bb.0: # %entry
-; CHECK-PWR7-NEXT:    stdu r1, -400(r1)
-; CHECK-PWR7-NEXT:    .cfi_def_cfa_offset 400
+; CHECK-PWR7-NEXT:    stdu r1, -416(r1)
+; CHECK-PWR7-NEXT:    .cfi_def_cfa_offset 416
+; CHECK-PWR7-NEXT:    .cfi_offset r23, -72
 ; CHECK-PWR7-NEXT:    .cfi_offset r24, -64
 ; CHECK-PWR7-NEXT:    .cfi_offset r25, -56
 ; CHECK-PWR7-NEXT:    .cfi_offset r26, -48
@@ -843,136 +799,138 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ; CHECK-PWR7-NEXT:    .cfi_offset r29, -24
 ; CHECK-PWR7-NEXT:    .cfi_offset r30, -16
 ; CHECK-PWR7-NEXT:    addi r3, r1, 304
-; CHECK-PWR7-NEXT:    std r24, 336(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r25, 344(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r26, 352(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r27, 360(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r28, 368(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r29, 376(r1) # 8-byte Folded Spill
-; CHECK-PWR7-NEXT:    std r30, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    addi r4, r1, 320
+; CHECK-PWR7-NEXT:    std r23, 344(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r24, 352(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r25, 360(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r26, 368(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r27, 376(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r28, 384(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r29, 392(r1) # 8-byte Folded Spill
+; CHECK-PWR7-NEXT:    std r30, 400(r1) # 8-byte Folded Spill
 ; CHECK-PWR7-NEXT:    stxvw4x v2, 0, r3
-; CHECK-PWR7-NEXT:    addi r3, r1, 320
-; CHECK-PWR7-NEXT:    lbz r4, 304(r1)
-; CHECK-PWR7-NEXT:    stxvw4x v3, 0, r3
+; CHECK-PWR7-NEXT:    lbz r3, 304(r1)
+; CHECK-PWR7-NEXT:    stxvw4x v3, 0, r4
 ; CHECK-PWR7-NEXT:    lbz r5, 305(r1)
 ; CHECK-PWR7-NEXT:    lbz r6, 321(r1)
 ; CHECK-PWR7-NEXT:    lbz r7, 306(r1)
 ; CHECK-PWR7-NEXT:    lbz r8, 322(r1)
+; CHECK-PWR7-NEXT:    cmplw r5, r6
 ; CHECK-PWR7-NEXT:    lbz r9, 307(r1)
 ; CHECK-PWR7-NEXT:    lbz r10, 323(r1)
+; CHECK-PWR7-NEXT:    sub r25, r6, r5
+; CHECK-PWR7-NEXT:    sub r24, r5, r6
+; CHECK-PWR7-NEXT:    lbz r11, 308(r1)
+; CHECK-PWR7-NEXT:    lbz r12, 324(r1)
+; CHECK-PWR7-NEXT:    sub r6, r8, r7
 ; CHECK-PWR7-NEXT:    lbz r0, 309(r1)
 ; CHECK-PWR7-NEXT:    lbz r30, 325(r1)
+; CHECK-PWR7-NEXT:    iselgt r5, r24, r25
+; CHECK-PWR7-NEXT:    cmplw r7, r8
+; CHECK-PWR7-NEXT:    sub r7, r7, r8
+; CHECK-PWR7-NEXT:    sub r8, r10, r9
+; CHECK-PWR7-NEXT:    sub r25, r9, r10
 ; CHECK-PWR7-NEXT:    lbz r29, 310(r1)
 ; CHECK-PWR7-NEXT:    lbz r28, 326(r1)
-; CHECK-PWR7-NEXT:    lbz r11, 308(r1)
-; CHECK-PWR7-NEXT:    lbz r12, 324(r1)
 ; CHECK-PWR7-NEXT:    lbz r27, 311(r1)
 ; CHECK-PWR7-NEXT:    lbz r26, 327(r1)
-; CHECK-PWR7-NEXT:    lbz r25, 312(r1)
-; CHECK-PWR7-NEXT:    sub r5, r5, r6
-; CHECK-PWR7-NEXT:    sub r6, r7, r8
-; CHECK-PWR7-NEXT:    sub r7, r9, r10
-; CHECK-PWR7-NEXT:    sub r9, r0, r30
-; CHECK-PWR7-NEXT:    sub r10, r29, r28
-; CHECK-PWR7-NEXT:    sub r8, r11, r12
-; CHECK-PWR7-NEXT:    srawi r0, r5, 31
-; CHECK-PWR7-NEXT:    srawi r30, r6, 31
-; CHECK-PWR7-NEXT:    srawi r29, r7, 31
-; CHECK-PWR7-NEXT:    srawi r28, r8, 31
-; CHECK-PWR7-NEXT:    sub r11, r27, r26
-; CHECK-PWR7-NEXT:    srawi r27, r9, 31
-; CHECK-PWR7-NEXT:    lbz r24, 328(r1)
-; CHECK-PWR7-NEXT:    xor r5, r5, r0
-; CHECK-PWR7-NEXT:    xor r6, r6, r30
-; CHECK-PWR7-NEXT:    xor r7, r7, r29
-; CHECK-PWR7-NEXT:    xor r8, r8, r28
-; CHECK-PWR7-NEXT:    xor r9, r9, r27
-; CHECK-PWR7-NEXT:    srawi r26, r10, 31
-; CHECK-PWR7-NEXT:    sub r5, r5, r0
-; CHECK-PWR7-NEXT:    sub r6, r6, r30
-; CHECK-PWR7-NEXT:    lbz r0, 313(r1)
-; CHECK-PWR7-NEXT:    lbz r30, 329(r1)
-; CHECK-PWR7-NEXT:    sub r7, r7, r29
-; CHECK-PWR7-NEXT:    lbz r29, 330(r1)
-; CHECK-PWR7-NEXT:    sub r8, r8, r28
-; CHECK-PWR7-NEXT:    lbz r28, 331(r1)
-; CHECK-PWR7-NEXT:    sub r9, r9, r27
-; CHECK-PWR7-NEXT:    lbz r27, 332(r1)
-; CHECK-PWR7-NEXT:    xor r10, r10, r26
-; CHECK-PWR7-NEXT:    sub r10, r10, r26
-; CHECK-PWR7-NEXT:    lbz r26, 333(r1)
-; CHECK-PWR7-NEXT:    sub r12, r25, r24
-; CHECK-PWR7-NEXT:    srawi r25, r11, 31
-; CHECK-PWR7-NEXT:    lbz r3, 320(r1)
-; CHECK-PWR7-NEXT:    sub r0, r0, r30
-; CHECK-PWR7-NEXT:    xor r11, r11, r25
-; CHECK-PWR7-NEXT:    sub r11, r11, r25
-; CHECK-PWR7-NEXT:    lbz r25, 334(r1)
-; CHECK-PWR7-NEXT:    sub r4, r4, r3
-; CHECK-PWR7-NEXT:    srawi r30, r0, 31
-; CHECK-PWR7-NEXT:    srawi r24, r12, 31
-; CHECK-PWR7-NEXT:    xor r12, r12, r24
-; CHECK-PWR7-NEXT:    sub r12, r12, r24
-; CHECK-PWR7-NEXT:    lbz r24, 335(r1)
-; CHECK-PWR7-NEXT:    srawi r3, r4, 31
-; CHECK-PWR7-NEXT:    xor r4, r4, r3
-; CHECK-PWR7-NEXT:    xor r0, r0, r30
-; CHECK-PWR7-NEXT:    sub r3, r4, r3
-; CHECK-PWR7-NEXT:    stb r3, 48(r1)
-; CHECK-PWR7-NEXT:    addi r3, r1, 288
-; CHECK-PWR7-NEXT:    stb r12, 176(r1)
-; CHECK-PWR7-NEXT:    sub r0, r0, r30
-; CHECK-PWR7-NEXT:    lbz r30, 314(r1)
-; CHECK-PWR7-NEXT:    stb r11, 160(r1)
-; CHECK-PWR7-NEXT:    sub r30, r30, r29
-; CHECK-PWR7-NEXT:    stb r0, 192(r1)
-; CHECK-PWR7-NEXT:    stb r10, 144(r1)
-; CHECK-PWR7-NEXT:    stb r9, 128(r1)
+; CHECK-PWR7-NEXT:    lbz r4, 320(r1)
+; CHECK-PWR7-NEXT:    iselgt r6, r7, r6
+; CHECK-PWR7-NEXT:    cmplw r9, r10
+; CHECK-PWR7-NEXT:    sub r9, r12, r11
+; CHECK-PWR7-NEXT:    sub r10, r11, r12
+; CHECK-PWR7-NEXT:    iselgt r7, r25, r8
+; CHECK-PWR7-NEXT:    cmplw r11, r12
+; CHECK-PWR7-NEXT:    sub r11, r30, r0
+; CHECK-PWR7-NEXT:    sub r12, r0, r30
+; CHECK-PWR7-NEXT:    iselgt r8, r10, r9
+; CHECK-PWR7-NEXT:    sub r10, r28, r29
+; CHECK-PWR7-NEXT:    cmplw r0, r30
+; CHECK-PWR7-NEXT:    sub r30, r29, r28
+; CHECK-PWR7-NEXT:    lbz r0, 328(r1)
+; CHECK-PWR7-NEXT:    iselgt r9, r12, r11
+; CHECK-PWR7-NEXT:    sub r11, r26, r27
+; CHECK-PWR7-NEXT:    cmplw r29, r28
+; CHECK-PWR7-NEXT:    sub r12, r27, r26
 ; CHECK-PWR7-NEXT:    stb r8, 112(r1)
 ; CHECK-PWR7-NEXT:    stb r7, 96(r1)
 ; CHECK-PWR7-NEXT:    stb r6, 80(r1)
-; CHECK-PWR7-NEXT:    srawi r29, r30, 31
 ; CHECK-PWR7-NEXT:    stb r5, 64(r1)
-; CHECK-PWR7-NEXT:    xor r30, r30, r29
-; CHECK-PWR7-NEXT:    sub r30, r30, r29
+; CHECK-PWR7-NEXT:    stb r9, 128(r1)
+; CHECK-PWR7-NEXT:    iselgt r10, r30, r10
+; CHECK-PWR7-NEXT:    cmplw r27, r26
+; CHECK-PWR7-NEXT:    stb r10, 144(r1)
+; CHECK-PWR7-NEXT:    iselgt r11, r12, r11
+; CHECK-PWR7-NEXT:    lbz r12, 312(r1)
+; CHECK-PWR7-NEXT:    stb r11, 160(r1)
+; CHECK-PWR7-NEXT:    sub r30, r0, r12
+; CHECK-PWR7-NEXT:    cmplw r12, r0
+; CHECK-PWR7-NEXT:    sub r29, r12, r0
+; CHECK-PWR7-NEXT:    lbz r0, 313(r1)
+; CHECK-PWR7-NEXT:    iselgt r12, r29, r30
+; CHECK-PWR7-NEXT:    lbz r30, 329(r1)
+; CHECK-PWR7-NEXT:    stb r12, 176(r1)
+; CHECK-PWR7-NEXT:    cmplw r0, r30
+; CHECK-PWR7-NEXT:    sub r29, r30, r0
+; CHECK-PWR7-NEXT:    sub r28, r0, r30
+; CHECK-PWR7-NEXT:    lbz r30, 314(r1)
+; CHECK-PWR7-NEXT:    iselgt r0, r28, r29
+; CHECK-PWR7-NEXT:    lbz r29, 330(r1)
+; CHECK-PWR7-NEXT:    cmplw r30, r29
+; CHECK-PWR7-NEXT:    sub r28, r29, r30
+; CHECK-PWR7-NEXT:    sub r27, r30, r29
 ; CHECK-PWR7-NEXT:    lbz r29, 315(r1)
-; CHECK-PWR7-NEXT:    sub r29, r29, r28
+; CHECK-PWR7-NEXT:    stb r0, 192(r1)
+; CHECK-PWR7-NEXT:    iselgt r30, r27, r28
+; CHECK-PWR7-NEXT:    lbz r28, 331(r1)
 ; CHECK-PWR7-NEXT:    stb r30, 208(r1)
-; CHECK-PWR7-NEXT:    ld r30, 384(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r28, r29, 31
-; CHECK-PWR7-NEXT:    xor r29, r29, r28
-; CHECK-PWR7-NEXT:    sub r29, r29, r28
+; CHECK-PWR7-NEXT:    ld r30, 400(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    cmplw r29, r28
+; CHECK-PWR7-NEXT:    sub r27, r28, r29
+; CHECK-PWR7-NEXT:    sub r26, r29, r28
 ; CHECK-PWR7-NEXT:    lbz r28, 316(r1)
-; CHECK-PWR7-NEXT:    sub r28, r28, r27
+; CHECK-PWR7-NEXT:    iselgt r29, r26, r27
+; CHECK-PWR7-NEXT:    lbz r27, 332(r1)
 ; CHECK-PWR7-NEXT:    stb r29, 224(r1)
-; CHECK-PWR7-NEXT:    ld r29, 376(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r27, r28, 31
-; CHECK-PWR7-NEXT:    xor r28, r28, r27
+; CHECK-PWR7-NEXT:    ld r29, 392(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    sub r26, r27, r28
+; CHECK-PWR7-NEXT:    cmplw r28, r27
 ; CHECK-PWR7-NEXT:    sub r28, r28, r27
 ; CHECK-PWR7-NEXT:    lbz r27, 317(r1)
-; CHECK-PWR7-NEXT:    sub r27, r27, r26
+; CHECK-PWR7-NEXT:    iselgt r28, r28, r26
+; CHECK-PWR7-NEXT:    lbz r26, 333(r1)
 ; CHECK-PWR7-NEXT:    stb r28, 240(r1)
-; CHECK-PWR7-NEXT:    ld r28, 368(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r26, r27, 31
-; CHECK-PWR7-NEXT:    xor r27, r27, r26
-; CHECK-PWR7-NEXT:    sub r27, r27, r26
+; CHECK-PWR7-NEXT:    ld r28, 384(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    cmplw r27, r26
+; CHECK-PWR7-NEXT:    sub r25, r26, r27
+; CHECK-PWR7-NEXT:    sub r24, r27, r26
 ; CHECK-PWR7-NEXT:    lbz r26, 318(r1)
-; CHECK-PWR7-NEXT:    sub r26, r26, r25
-; CHECK-PWR7-NEXT:    stb r27, 256(r1)
-; CHECK-PWR7-NEXT:    ld r27, 360(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r25, r26, 31
-; CHECK-PWR7-NEXT:    xor r26, r26, r25
-; CHECK-PWR7-NEXT:    sub r26, r26, r25
+; CHECK-PWR7-NEXT:    iselgt r27, r24, r25
+; CHECK-PWR7-NEXT:    lbz r25, 334(r1)
+; CHECK-PWR7-NEXT:    sub r24, r25, r26
+; CHECK-PWR7-NEXT:    cmplw r26, r25
+; CHECK-PWR7-NEXT:    sub r23, r26, r25
 ; CHECK-PWR7-NEXT:    lbz r25, 319(r1)
+; CHECK-PWR7-NEXT:    iselgt r26, r23, r24
+; CHECK-PWR7-NEXT:    lbz r24, 335(r1)
+; CHECK-PWR7-NEXT:    stb r27, 256(r1)
+; CHECK-PWR7-NEXT:    ld r27, 376(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    cmplw r25, r24
+; CHECK-PWR7-NEXT:    sub r23, r24, r25
 ; CHECK-PWR7-NEXT:    sub r25, r25, r24
+; CHECK-PWR7-NEXT:    sub r24, r4, r3
 ; CHECK-PWR7-NEXT:    stb r26, 272(r1)
-; CHECK-PWR7-NEXT:    ld r26, 352(r1) # 8-byte Folded Reload
-; CHECK-PWR7-NEXT:    srawi r24, r25, 31
-; CHECK-PWR7-NEXT:    xor r25, r25, r24
-; CHECK-PWR7-NEXT:    sub r25, r25, r24
-; CHECK-PWR7-NEXT:    ld r24, 336(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    ld r26, 368(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    iselgt r25, r25, r23
+; CHECK-PWR7-NEXT:    cmplw r3, r4
+; CHECK-PWR7-NEXT:    sub r23, r3, r4
+; CHECK-PWR7-NEXT:    iselgt r3, r23, r24
 ; CHECK-PWR7-NEXT:    stb r25, 288(r1)
-; CHECK-PWR7-NEXT:    ld r25, 344(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    ld r25, 360(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    ld r24, 352(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    ld r23, 344(r1) # 8-byte Folded Reload
+; CHECK-PWR7-NEXT:    stb r3, 48(r1)
+; CHECK-PWR7-NEXT:    addi r3, r1, 288
 ; CHECK-PWR7-NEXT:    lxvw4x v2, 0, r3
 ; CHECK-PWR7-NEXT:    addi r3, r1, 272
 ; CHECK-PWR7-NEXT:    lxvw4x v3, 0, r3
@@ -1019,7 +977,7 @@ define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr
 ; CHECK-PWR7-NEXT:    vmrghh v3, v4, v3
 ; CHECK-PWR7-NEXT:    xxmrghw vs1, v3, v2
 ; CHECK-PWR7-NEXT:    xxmrghd v2, vs1, vs0
-; CHECK-PWR7-NEXT:    addi r1, r1, 400
+; CHECK-PWR7-NEXT:    addi r1, r1, 416
 ; CHECK-PWR7-NEXT:    blr
 entry:
   %vecext = extractelement <16 x i8> %a, i32 0
@@ -1208,13 +1166,20 @@ entry:
 }
 
 define <8 x i16> @sub_absv_vec_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr {
-; CHECK-LABEL: sub_absv_vec_16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsubuhm v2, v2, v3
-; CHECK-NEXT:    xxlxor v3, v3, v3
-; CHECK-NEXT:    vsubuhm v3, v3, v2
-; CHECK-NEXT:    vmaxsh v2, v2, v3
-; CHECK-NEXT:    blr
+; CHECK-PWR9-LABEL: sub_absv_vec_16:
+; CHECK-PWR9:       # %bb.0: # %entry
+; CHECK-PWR9-NEXT:    vminsh v4, v2, v3
+; CHECK-PWR9-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR9-NEXT:    vsubuhm v2, v2, v4
+; CHECK-PWR9-NEXT:    blr
+;
+; CHECK-PWR78-LABEL: sub_absv_vec_16:
+; CHECK-PWR78:       # %bb.0: # %entry
+; CHECK-PWR78-NEXT:    vsubuhm v2, v2, v3
+; CHECK-PWR78-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR78-NEXT:    vsubuhm v3, v3, v2
+; CHECK-PWR78-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR78-NEXT:    blr
 entry:
   %sub = sub nsw <8 x i16> %a, %b
   %sub.i = sub <8 x i16> zeroinitializer, %sub
@@ -1223,13 +1188,20 @@ entry:
 }
 
 define <16 x i8> @sub_absv_vec_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr {
-; CHECK-LABEL: sub_absv_vec_8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsububm v2, v2, v3
-; CHECK-NEXT:    xxlxor v3, v3, v3
-; CHECK-NEXT:    vsububm v3, v3, v2
-; CHECK-NEXT:    vmaxsb v2, v2, v3
-; CHECK-NEXT:    blr
+; CHECK-PWR9-LABEL: sub_absv_vec_8:
+; CHECK-PWR9:       # %bb.0: # %entry
+; CHECK-PWR9-NEXT:    vminsb v4, v2, v3
+; CHECK-PWR9-NEXT:    vmaxsb v2, v2, v3
+; CHECK-PWR9-NEXT:    vsububm v2, v2, v4
+; CHECK-PWR9-NEXT:    blr
+;
+; CHECK-PWR78-LABEL: sub_absv_vec_8:
+; CHECK-PWR78:       # %bb.0: # %entry
+; CHECK-PWR78-NEXT:    vsububm v2, v2, v3
+; CHECK-PWR78-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR78-NEXT:    vsububm v3, v3, v2
+; CHECK-PWR78-NEXT:    vmaxsb v2, v2, v3
+; CHECK-PWR78-NEXT:    blr
 entry:
   %sub = sub nsw <16 x i8> %a, %b
   %sub.i = sub <16 x i8> zeroinitializer, %sub
@@ -1240,18 +1212,16 @@ entry:
 define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
 ; CHECK-PWR9-LE-LABEL: zext_sub_absd32:
 ; CHECK-PWR9-LE:       # %bb.0:
-; CHECK-PWR9-LE-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR9-LE-NEXT:    vmrglh v2, v4, v2
-; CHECK-PWR9-LE-NEXT:    vmrglh v3, v4, v3
-; CHECK-PWR9-LE-NEXT:    vabsduw v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    vabsduh v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT:    vmrglh v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    blr
 ;
 ; CHECK-PWR9-BE-LABEL: zext_sub_absd32:
 ; CHECK-PWR9-BE:       # %bb.0:
-; CHECK-PWR9-BE-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR9-BE-NEXT:    vmrghh v2, v4, v2
-; CHECK-PWR9-BE-NEXT:    vmrghh v3, v4, v3
-; CHECK-PWR9-BE-NEXT:    vabsduw v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    vabsduh v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-PWR9-BE-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: zext_sub_absd32:
@@ -1287,18 +1257,16 @@ define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
 define <8 x i16> @zext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr {
 ; CHECK-PWR9-LE-LABEL: zext_sub_absd16:
 ; CHECK-PWR9-LE:       # %bb.0:
-; CHECK-PWR9-LE-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR9-LE-NEXT:    vmrglb v2, v4, v2
-; CHECK-PWR9-LE-NEXT:    vmrglb v3, v4, v3
-; CHECK-PWR9-LE-NEXT:    vabsduh v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    vabsdub v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT:    vmrglb v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    blr
 ;
 ; CHECK-PWR9-BE-LABEL: zext_sub_absd16:
 ; CHECK-PWR9-BE:       # %bb.0:
-; CHECK-PWR9-BE-NEXT:    xxlxor v4, v4, v4
-; CHECK-PWR9-BE-NEXT:    vmrghb v2, v4, v2
-; CHECK-PWR9-BE-NEXT:    vmrghb v3, v4, v3
-; CHECK-PWR9-BE-NEXT:    vabsduh v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    vabsdub v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT:    vmrghb v2, v3, v2
 ; CHECK-PWR9-BE-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: zext_sub_absd16:
@@ -1335,8 +1303,8 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
 ; CHECK-PWR9-LABEL: zext_sub_absd8:
 ; CHECK-PWR9:       # %bb.0:
 ; CHECK-PWR9-NEXT:    xxspltib vs0, 15
-; CHECK-PWR9-NEXT:    xxland v2, v2, vs0
 ; CHECK-PWR9-NEXT:    xxland v3, v3, vs0
+; CHECK-PWR9-NEXT:    xxland v2, v2, vs0
 ; CHECK-PWR9-NEXT:    vabsdub v2, v2, v3
 ; CHECK-PWR9-NEXT:    blr
 ;
@@ -1361,24 +1329,20 @@ define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
 define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
 ; CHECK-PWR9-LE-LABEL: sext_sub_absd32:
 ; CHECK-PWR9-LE:       # %bb.0:
-; CHECK-PWR9-LE-NEXT:    vmrglh v2, v2, v2
-; CHECK-PWR9-LE-NEXT:    vmrglh v3, v3, v3
-; CHECK-PWR9-LE-NEXT:    vextsh2w v2, v2
-; CHECK-PWR9-LE-NEXT:    vextsh2w v3, v3
-; CHECK-PWR9-LE-NEXT:    xvnegsp v3, v3
-; CHECK-PWR9-LE-NEXT:    xvnegsp v2, v2
-; CHECK-PWR9-LE-NEXT:    vabsduw v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    vminsh v4, v2, v3
+; CHECK-PWR9-LE-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-LE-NEXT:    vsubuhm v2, v2, v4
+; CHECK-PWR9-LE-NEXT:    vmrglh v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    blr
 ;
 ; CHECK-PWR9-BE-LABEL: sext_sub_absd32:
 ; CHECK-PWR9-BE:       # %bb.0:
-; CHECK-PWR9-BE-NEXT:    vmrghh v2, v2, v2
-; CHECK-PWR9-BE-NEXT:    vmrghh v3, v3, v3
-; CHECK-PWR9-BE-NEXT:    vextsh2w v2, v2
-; CHECK-PWR9-BE-NEXT:    vextsh2w v3, v3
-; CHECK-PWR9-BE-NEXT:    xvnegsp v3, v3
-; CHECK-PWR9-BE-NEXT:    xvnegsp v2, v2
-; CHECK-PWR9-BE-NEXT:    vabsduw v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    vminsh v4, v2, v3
+; CHECK-PWR9-BE-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    xxlxor v3, v3, v3
+; CHECK-PWR9-BE-NEXT:    vsubuhm v2, v2, v4
+; CHECK-PWR9-BE-NEXT:    vmrghh v2, v3, v2
 ; CHECK-PWR9-BE-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: sext_sub_absd32:
@@ -1423,32 +1387,20 @@ define <4 x i32> @sext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr {
 define <8 x i16> @sext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr {
 ; CHECK-PWR9-LE-LABEL: sext_sub_absd16:
 ; CHECK-PWR9-LE:       # %bb.0:
-; CHECK-PWR9-LE-NEXT:    vmrglb v2, v2, v2
-; CHECK-PWR9-LE-NEXT:    vspltish v4, 8
-; CHECK-PWR9-LE-NEXT:    vmrglb v3, v3, v3
-; CHECK-PWR9-LE-NEXT:    vslh v2, v2, v4
-; CHECK-PWR9-LE-NEXT:    vslh v3, v3, v4
-; CHECK-PWR9-LE-NEXT:    vsrah v2, v2, v4
-; CHECK-PWR9-LE-NEXT:    vsrah v3, v3, v4
-; CHECK-PWR9-LE-NEXT:    vsubuhm v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    vminsb v4, v2, v3
+; CHECK-PWR9-LE-NEXT:    vmaxsb v2, v2, v3
 ; CHECK-PWR9-LE-NEXT:    xxlxor v3, v3, v3
-; CHECK-PWR9-LE-NEXT:    vsubuhm v3, v3, v2
-; CHECK-PWR9-LE-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR9-LE-NEXT:    vsububm v2, v2, v4
+; CHECK-PWR9-LE-NEXT:    vmrglb v2, v3, v2
 ; CHECK-PWR9-LE-NEXT:    blr
 ;
 ; CHECK-PWR9-BE-LABEL: sext_sub_absd16:
 ; CHECK-PWR9-BE:       # %bb.0:
-; CHECK-PWR9-BE-NEXT:    vmrghb v2, v2, v2
-; CHECK-PWR9-BE-NEXT:    vspltish v4, 8
-; CHECK-PWR9-BE-NEXT:    vmrghb v3, v3, v3
-; CHECK-PWR9-BE-NEXT:    vslh v2, v2, v4
-; CHECK-PWR9-BE-NEXT:    vslh v3, v3, v4
-; CHECK-PWR9-BE-NEXT:    vsrah v2, v2, v4
-; CHECK-PWR9-BE-NEXT:    vsrah v3, v3, v4
-; CHECK-PWR9-BE-NEXT:    vsubuhm v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    vminsb v4, v2, v3
+; CHECK-PWR9-BE-NEXT:    vmaxsb v2, v2, v3
 ; CHECK-PWR9-BE-NEXT:    xxlxor v3, v3, v3
-; CHECK-PWR9-BE-NEXT:    vsubuhm v3, v3, v2
-; CHECK-PWR9-BE-NEXT:    vmaxsh v2, v2, v3
+; CHECK-PWR9-BE-NEXT:    vsububm v2, v2, v4
+; CHECK-PWR9-BE-NEXT:    vmrghb v2, v3, v2
 ; CHECK-PWR9-BE-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: sext_sub_absd16:
@@ -1492,14 +1444,15 @@ define <16 x i8> @sext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr {
 ; CHECK-PWR9-LABEL: sext_sub_absd8:
 ; CHECK-PWR9:       # %bb.0:
 ; CHECK-PWR9-NEXT:    xxspltib v4, 4
-; CHECK-PWR9-NEXT:    vslb v2, v2, v4
+; CHECK-PWR9-NEXT:    xxspltib vs0, 15
 ; CHECK-PWR9-NEXT:    vslb v3, v3, v4
-; CHECK-PWR9-NEXT:    vsrab v2, v2, v4
+; CHECK-PWR9-NEXT:    vslb v2, v2, v4
 ; CHECK-PWR9-NEXT:    vsrab v3, v3, v4
-; CHECK-PWR9-NEXT:    vsububm v2, v2, v3
-; CHECK-PWR9-NEXT:    xxlxor v3, v3, v3
-; CHECK-PWR9-NEXT:    vsububm v3, v3, v2
+; CHECK-PWR9-NEXT:    vsrab v2, v2, v4
+; CHECK-PWR9-NEXT:    vminsb v4, v2, v3
 ; CHECK-PWR9-NEXT:    vmaxsb v2, v2, v3
+; CHECK-PWR9-NEXT:    vsububm v2, v2, v4
+; CHECK-PWR9-NEXT:    xxland v2, v2, vs0
 ; CHECK-PWR9-NEXT:    blr
 ;
 ; CHECK-PWR78-LABEL: sext_sub_absd8:
@@ -1532,10 +1485,9 @@ define <4 x i32> @absd_int32_ugt(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_ugt:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuw v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminuw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ugt <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1552,11 +1504,9 @@ define <4 x i32> @absd_int32_uge(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_uge:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuw v4, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT:    vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v4, vs0
+; CHECK-PWR78-NEXT:    vminuw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp uge <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1573,10 +1523,9 @@ define <4 x i32> @absd_int32_ult(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_ult:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuw v4, v3, v2
-; CHECK-PWR78-NEXT:    vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminuw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ult <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1593,11 +1542,9 @@ define <4 x i32> @absd_int32_ule(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_ule:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuw v4, v2, v3
-; CHECK-PWR78-NEXT:    xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT:    vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v4, v2, vs0
+; CHECK-PWR78-NEXT:    vminuw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ule <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1614,10 +1561,9 @@ define <8 x i16> @absd_int16_ugt(<8 x i16>, <8 x i16>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int16_ugt:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuh v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminuh v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ugt <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1634,11 +1580,9 @@ define <8 x i16> @absd_int16_uge(<8 x i16>, <8 x i16>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int16_uge:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuh v4, v3, v2
-; CHECK-PWR78-NEXT:    vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminuh v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp uge <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1655,10 +1599,9 @@ define <8 x i16> @absd_int16_ult(<8 x i16>, <8 x i16>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int16_ult:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuh v4, v3, v2
-; CHECK-PWR78-NEXT:    vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminuh v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ult <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1675,11 +1618,9 @@ define <8 x i16> @absd_int16_ule(<8 x i16>, <8 x i16>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int16_ule:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtuh v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuhm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminuh v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxuh v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ule <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1696,10 +1637,9 @@ define <16 x i8> @absd_int8_ugt(<16 x i8>, <16 x i8>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int8_ugt:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtub v4, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminub v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT:    vsububm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ugt <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1716,11 +1656,9 @@ define <16 x i8> @absd_int8_uge(<16 x i8>, <16 x i8>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int8_uge:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtub v4, v3, v2
-; CHECK-PWR78-NEXT:    vsububm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminub v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT:    vsububm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp uge <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1737,10 +1675,9 @@ define <16 x i8> @absd_int8_ult(<16 x i8>, <16 x i8>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int8_ult:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtub v4, v3, v2
-; CHECK-PWR78-NEXT:    vsububm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminub v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT:    vsububm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ult <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1757,11 +1694,9 @@ define <16 x i8> @absd_int8_ule(<16 x i8>, <16 x i8>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int8_ule:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtub v4, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsububm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor v4, v4, v4
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminub v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxub v2, v2, v3
+; CHECK-PWR78-NEXT:    vsububm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp ule <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1782,10 +1717,9 @@ define <4 x i32> @absd_int32_sgt(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_sgt:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtsw v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR78-NEXT:    vminsw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp sgt <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1804,11 +1738,9 @@ define <4 x i32> @absd_int32_sge(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_sge:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtsw v4, v3, v2
-; CHECK-PWR78-NEXT:    xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT:    vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v2, v4, vs0
+; CHECK-PWR78-NEXT:    vminsw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp sge <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1827,10 +1759,9 @@ define <4 x i32> @absd_int32_slt(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_slt:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtsw v4, v3, v2
-; CHECK-PWR78-NEXT:    vsubuwm v5, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-PWR78-NEXT:    vminsw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp slt <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1849,11 +1780,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) {
 ;
 ; CHECK-PWR78-LABEL: absd_int32_sle:
 ; CHECK-PWR78:       # %bb.0:
-; CHECK-PWR78-NEXT:    vcmpgtsw v4, v2, v3
-; CHECK-PWR78-NEXT:    xxlnor vs0, v4, v4
-; CHECK-PWR78-NEXT:    vsubuwm v4, v2, v3
-; CHECK-PWR78-NEXT:    vsubuwm v2, v3, v2
-; CHECK-PWR78-NEXT:    xxsel v2, v4, v2, vs0
+; CHECK-PWR78-NEXT:    vminsw v4, v2, v3
+; CHECK-PWR78-NEXT:    vmaxsw v2, v2, v3
+; CHECK-PWR78-NEXT:    vsubuwm v2, v2, v4
 ; CHECK-PWR78-NEXT:    blr
   %3 = icmp sle <4 x i32> %0, %1
   %4 = sub <4 x i32> %0, %1
@@ -1865,10 +1794,9 @@ define <4 x i32> @absd_int32_sle(<4 x i32>, <4 x i32>) {
 define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) {
 ; CHECK-LABEL: absd_int16_sgt:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsh v4, v2, v3
-; CHECK-NEXT:    vsubuhm v5, v2, v3
-; CHECK-NEXT:    vsubuhm v2, v3, v2
-; CHECK-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-NEXT:    vminsh v4, v2, v3
+; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sgt <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1880,11 +1808,9 @@ define <8 x i16> @absd_int16_sgt(<8 x i16>, <8 x i16>) {
 define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) {
 ; CHECK-LABEL: absd_int16_sge:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsh v4, v3, v2
-; CHECK-NEXT:    vsubuhm v5, v2, v3
-; CHECK-NEXT:    vsubuhm v2, v3, v2
-; CHECK-NEXT:    xxlnor v4, v4, v4
-; CHECK-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-NEXT:    vminsh v4, v2, v3
+; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sge <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1896,10 +1822,9 @@ define <8 x i16> @absd_int16_sge(<8 x i16>, <8 x i16>) {
 define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) {
 ; CHECK-LABEL: absd_int16_slt:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsh v4, v3, v2
-; CHECK-NEXT:    vsubuhm v5, v2, v3
-; CHECK-NEXT:    vsubuhm v2, v3, v2
-; CHECK-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-NEXT:    vminsh v4, v2, v3
+; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp slt <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1911,11 +1836,9 @@ define <8 x i16> @absd_int16_slt(<8 x i16>, <8 x i16>) {
 define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) {
 ; CHECK-LABEL: absd_int16_sle:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsh v4, v2, v3
-; CHECK-NEXT:    vsubuhm v5, v2, v3
-; CHECK-NEXT:    vsubuhm v2, v3, v2
-; CHECK-NEXT:    xxlnor v4, v4, v4
-; CHECK-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-NEXT:    vminsh v4, v2, v3
+; CHECK-NEXT:    vmaxsh v2, v2, v3
+; CHECK-NEXT:    vsubuhm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sle <8 x i16> %0, %1
   %4 = sub <8 x i16> %0, %1
@@ -1927,10 +1850,9 @@ define <8 x i16> @absd_int16_sle(<8 x i16>, <8 x i16>) {
 define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) {
 ; CHECK-LABEL: absd_int8_sgt:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsb v4, v2, v3
-; CHECK-NEXT:    vsububm v5, v2, v3
-; CHECK-NEXT:    vsububm v2, v3, v2
-; CHECK-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-NEXT:    vminsb v4, v2, v3
+; CHECK-NEXT:    vmaxsb v2, v2, v3
+; CHECK-NEXT:    vsububm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sgt <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1942,11 +1864,9 @@ define <16 x i8> @absd_int8_sgt(<16 x i8>, <16 x i8>) {
 define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) {
 ; CHECK-LABEL: absd_int8_sge:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsb v4, v3, v2
-; CHECK-NEXT:    vsububm v5, v2, v3
-; CHECK-NEXT:    vsububm v2, v3, v2
-; CHECK-NEXT:    xxlnor v4, v4, v4
-; CHECK-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-NEXT:    vminsb v4, v2, v3
+; CHECK-NEXT:    vmaxsb v2, v2, v3
+; CHECK-NEXT:    vsububm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sge <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1958,10 +1878,9 @@ define <16 x i8> @absd_int8_sge(<16 x i8>, <16 x i8>) {
 define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) {
 ; CHECK-LABEL: absd_int8_slt:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsb v4, v3, v2
-; CHECK-NEXT:    vsububm v5, v2, v3
-; CHECK-NEXT:    vsububm v2, v3, v2
-; CHECK-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-NEXT:    vminsb v4, v2, v3
+; CHECK-NEXT:    vmaxsb v2, v2, v3
+; CHECK-NEXT:    vsububm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp slt <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -1973,11 +1892,9 @@ define <16 x i8> @absd_int8_slt(<16 x i8>, <16 x i8>) {
 define <16 x i8> @absd_int8_sle(<16 x i8>, <16 x i8>) {
 ; CHECK-LABEL: absd_int8_sle:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vcmpgtsb v4, v2, v3
-; CHECK-NEXT:    vsububm v5, v2, v3
-; CHECK-NEXT:    vsububm v2, v3, v2
-; CHECK-NEXT:    xxlnor v4, v4, v4
-; CHECK-NEXT:    xxsel v2, v5, v2, v4
+; CHECK-NEXT:    vminsb v4, v2, v3
+; CHECK-NEXT:    vmaxsb v2, v2, v3
+; CHECK-NEXT:    vsububm v2, v2, v4
 ; CHECK-NEXT:    blr
   %3 = icmp sle <16 x i8> %0, %1
   %4 = sub <16 x i8> %0, %1
@@ -2006,53 +1923,51 @@ define <4 x i32> @absd_int32_ugt_opp(<4 x i32>, <4 x i32>) {
 define <2 x i64> @absd_int64_ugt(<2 x i64>, <2 x i64>) {
 ; CHECK-PWR9-LABEL: absd_int64_ugt:
 ; CHECK-PWR9:       # %bb.0:
-; CHECK-PWR9-NEXT:    vcmpgtud v4, v2, v3
-; CHECK-PWR9-NEXT:    vsubudm v5, v2, v3
-; CHECK-PWR9-NEXT:    vsubudm v2, v3, v2
-; CHECK-PWR9-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR9-NEXT:    vminud v4, v2, v3
+; CHECK-PWR9-NEXT:    vmaxud v2, v2, v3
+; CHECK-PWR9-NEXT:    vsubudm v2, v2, v4
 ; CHECK-PWR9-NEXT:    blr
 ;
 ; CHECK-PWR8-LABEL: absd_int64_ugt:
 ; CHECK-PWR8:       # %bb.0:
-; CHECK-PWR8-NEXT:    vcmpgtud v4, v2, v3
-; CHECK-PWR8-NEXT:    vsubudm v5, v2, v3
-; CHECK-PWR8-NEXT:    vsubudm v2, v3, v2
-; CHECK-PWR8-NEXT:    xxsel v2, v2, v5, v4
+; CHECK-PWR8-NEXT:    vminud v4, v2, v3
+; CHECK-PWR8-NEXT:    vmaxud v2, v2, v3
+; CHECK-PWR8-NEXT:    vsubudm v2, v2, v4
 ; CHECK-PWR8-NEXT:    blr
 ;
 ; CHECK-PWR7-LABEL: absd_int64_ugt:
 ; CHECK-PWR7:       # %bb.0:
-; CHECK-PWR7-NEXT:    addi r3, r1, -64
-; CHECK-PWR7-NEXT:    addi r4, r1, -80
-; CHECK-PWR7-NEXT:    li r5, 0
-; CHECK-PWR7-NEXT:    li r6, -1
-; CHECK-PWR7-NEXT:    stxvd2x v3, 0, r3
-; CHECK-PWR7-NEXT:    stxvd2x v2, 0, r4
-; CHECK-PWR7-NEXT:    addi r9, r1, -16
-; CHECK-PWR7-NEXT:    ld r3, -56(r1)
-; CHECK-PWR7-NEXT:    ld r4, -72(r1)
-; CHECK-PWR7-NEXT:    ld r8, -80(r1)
-; CHECK-PWR7-NEXT:    cmpld r4, r3
-; CHECK-PWR7-NEXT:    iselgt r7, r6, r5
+; CHECK-PWR7-NEXT:    addi r3, r1, -80
+; CHECK-PWR7-NEXT:    addi r4, r1, -64
+; CHECK-PWR7-NEXT:    stxvd2x v2, 0, r3
+; CHECK-PWR7-NEXT:    stxvd2x v3, 0, r4
+; CHECK-PWR7-NEXT:    ld r3, -72(r1)
+; CHECK-PWR7-NEXT:    ld r4, -56(r1)
+; CHECK-PWR7-NEXT:    ld r6, -64(r1)
+; CHECK-PWR7-NEXT:    sub r5, r3, r4
+; CHECK-PWR7-NEXT:    cmpld r3, r4
+; CHECK-PWR7-NEXT:    std r5, -24(r1)
+; CHECK-PWR7-NEXT:    ld r5, -80(r1)
+; CHECK-PWR7-NEXT:    sub r7, r5, r6
+; CHECK-PWR7-NEXT:    std r7, -32(r1)
+; CHECK-PWR7-NEXT:    sub r7, r4, r3
+; CHECK-PWR7-NEXT:    li r3, 0
+; CHECK-PWR7-NEXT:    li r4, -1
+; CHECK-PWR7-NEXT:    std r7, -40(r1)
+; CHECK-PWR7-NEXT:    sub r7, r6, r5
+; CHECK-PWR7-NEXT:    std r7, -48(r1)
+; CHECK-PWR7-NEXT:    iselgt r7, r4, r3
+; CHECK-PWR7-NEXT:    cmpld r5, r6
 ; CHECK-PWR7-NEXT:    std r7, -8(r1)
-; CHECK-PWR7-NEXT:    ld r7, -64(r1)
-; CHECK-PWR7-NEXT:    cmpld r8, r7
-; CHECK-PWR7-NEXT:    iselgt r5, r6, r5
-; CHECK-PWR7-NEXT:    std r5, -16(r1)
-; CHECK-PWR7-NEXT:    sub r5, r4, r3
-; CHECK-PWR7-NEXT:    sub r3, r3, r4
-; CHECK-PWR7-NEXT:    lxvd2x v2, 0, r9
-; CHECK-PWR7-NEXT:    std r5, -40(r1)
-; CHECK-PWR7-NEXT:    sub r5, r8, r7
-; CHECK-PWR7-NEXT:    std r5, -48(r1)
-; CHECK-PWR7-NEXT:    addi r5, r1, -48
-; CHECK-PWR7-NEXT:    lxvd2x v3, 0, r5
-; CHECK-PWR7-NEXT:    std r3, -24(r1)
-; CHECK-PWR7-NEXT:    sub r3, r7, r8
-; CHECK-PWR7-NEXT:    std r3, -32(r1)
+; CHECK-PWR7-NEXT:    iselgt r3, r4, r3
+; CHECK-PWR7-NEXT:    std r3, -16(r1)
 ; CHECK-PWR7-NEXT:    addi r3, r1, -32
+; CHECK-PWR7-NEXT:    lxvd2x v2, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, -48
+; CHECK-PWR7-NEXT:    lxvd2x v3, 0, r3
+; CHECK-PWR7-NEXT:    addi r3, r1, -16
 ; CHECK-PWR7-NEXT:    lxvd2x v4, 0, r3
-; CHECK-PWR7-NEXT:    xxsel v2, v4, v3, v2
+; CHECK-PWR7-NEXT:    xxsel v2, v3, v2, v4
 ; CHECK-PWR7-NEXT:    blr
   %3 = icmp ugt <2 x i64> %0, %1
   %4 = sub <2 x i64> %0, %1
diff --git a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
index 787b81f7f2098..32c28148df32e 100644
--- a/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
+++ b/llvm/test/CodeGen/PowerPC/vec-zext-abdu.ll
@@ -1,31 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le -mcpu=pwr9 < %s | FileCheck %s
 
+; Widen to <16 x i8>
 define <12 x i8> @zext_abdu(<12 x i8> %a, <12 x i8> %b) {
 ; CHECK-LABEL: zext_abdu:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addis 3, 2, .LCPI0_0 at toc@ha
-; CHECK-NEXT:    xxlxor 36, 36, 36
-; CHECK-NEXT:    addi 3, 3, .LCPI0_0 at toc@l
-; CHECK-NEXT:    lxv 37, 0(3)
-; CHECK-NEXT:    addis 3, 2, .LCPI0_1 at toc@ha
-; CHECK-NEXT:    addi 3, 3, .LCPI0_1 at toc@l
-; CHECK-NEXT:    lxv 33, 0(3)
-; CHECK-NEXT:    addis 3, 2, .LCPI0_2 at toc@ha
-; CHECK-NEXT:    vperm 0, 4, 2, 5
-; CHECK-NEXT:    vperm 5, 4, 3, 5
-; CHECK-NEXT:    addi 3, 3, .LCPI0_2 at toc@l
-; CHECK-NEXT:    lxv 39, 0(3)
-; CHECK-NEXT:    vperm 6, 4, 2, 1
-; CHECK-NEXT:    vperm 1, 4, 3, 1
-; CHECK-NEXT:    vperm 2, 4, 2, 7
-; CHECK-NEXT:    vperm 3, 4, 3, 7
-; CHECK-NEXT:    vabsduw 4, 5, 0
-; CHECK-NEXT:    vabsduw 2, 3, 2
-; CHECK-NEXT:    vabsduw 3, 1, 6
-; CHECK-NEXT:    vpkuwum 3, 4, 3
-; CHECK-NEXT:    vpkuwum 2, 2, 2
-; CHECK-NEXT:    vpkuhum 2, 2, 3
+; CHECK-NEXT:    vabsdub 2, 2, 3
 ; CHECK-NEXT:    blr
 entry:
   %aa = zext <12 x i8> %a to <12 x i32>
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index ddbfbd0b59fa4..b3a1121b61996 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -26,13 +26,9 @@ define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-LABEL: sabd_b_promoted_ops:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmerge.vim v12, v10, -1, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v8, v10, -1, v0
-; CHECK-NEXT:    vmin.vv v10, v12, v8
-; CHECK-NEXT:    vmax.vv v8, v12, v8
-; CHECK-NEXT:    vsub.vv v8, v8, v10
+; CHECK-NEXT:    vmxor.mm v0, v8, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
   %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
@@ -158,13 +154,9 @@ define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-LABEL: uabd_b_promoted_ops:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    vminu.vv v10, v12, v8
-; CHECK-NEXT:    vmaxu.vv v8, v12, v8
-; CHECK-NEXT:    vsub.vv v8, v8, v10
+; CHECK-NEXT:    vmxor.mm v0, v8, v0
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
   %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
diff --git a/llvm/test/CodeGen/X86/abds-vector-128.ll b/llvm/test/CodeGen/X86/abds-vector-128.ll
index 3143bf6190657..8d68161967a5e 100644
--- a/llvm/test/CodeGen/X86/abds-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abds-vector-128.ll
@@ -198,33 +198,24 @@ define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
 define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_ext_v2i64:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT:    movq %xmm2, %rax
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    movq %xmm0, %rdx
-; SSE2-NEXT:    movq %rdx, %rsi
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT:    movq %xmm0, %rdi
-; SSE2-NEXT:    movq %rdi, %r8
-; SSE2-NEXT:    sarq $63, %r8
-; SSE2-NEXT:    movq %xmm1, %r9
-; SSE2-NEXT:    movq %r9, %r10
-; SSE2-NEXT:    sarq $63, %r10
-; SSE2-NEXT:    subq %r9, %rdx
-; SSE2-NEXT:    sbbq %r10, %rsi
-; SSE2-NEXT:    subq %rdi, %rax
-; SSE2-NEXT:    sbbq %r8, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    xorq %rcx, %rax
-; SSE2-NEXT:    subq %rcx, %rax
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    xorq %rsi, %rdx
-; SSE2-NEXT:    subq %rsi, %rdx
-; SSE2-NEXT:    movq %rdx, %xmm0
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_ext_v2i64:
@@ -272,33 +263,24 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_ext_v2i64_undef:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT:    movq %xmm2, %rax
-; SSE2-NEXT:    movq %rax, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    movq %xmm0, %rdx
-; SSE2-NEXT:    movq %rdx, %rsi
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT:    movq %xmm0, %rdi
-; SSE2-NEXT:    movq %rdi, %r8
-; SSE2-NEXT:    sarq $63, %r8
-; SSE2-NEXT:    movq %xmm1, %r9
-; SSE2-NEXT:    movq %r9, %r10
-; SSE2-NEXT:    sarq $63, %r10
-; SSE2-NEXT:    subq %r9, %rdx
-; SSE2-NEXT:    sbbq %r10, %rsi
-; SSE2-NEXT:    subq %rdi, %rax
-; SSE2-NEXT:    sbbq %r8, %rcx
-; SSE2-NEXT:    sarq $63, %rcx
-; SSE2-NEXT:    xorq %rcx, %rax
-; SSE2-NEXT:    subq %rcx, %rax
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    xorq %rsi, %rdx
-; SSE2-NEXT:    subq %rsi, %rdx
-; SSE2-NEXT:    movq %rdx, %xmm0
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_ext_v2i64_undef:
@@ -449,15 +431,12 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-NEXT:    pand %xmm5, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
 ; SSE2-NEXT:    por %xmm2, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm2
-; SSE2-NEXT:    pandn %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm1, %xmm4
-; SSE2-NEXT:    pand %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm2, %xmm1
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
 ; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_minmax_v2i64:
@@ -598,24 +577,23 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_cmp_v2i64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
-; SSE2-NEXT:    pxor %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm0, %xmm2
 ; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE2-NEXT:    pand %xmm5, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
-; SSE2-NEXT:    psubq %xmm1, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm1
-; SSE2-NEXT:    pand %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm3, %xmm2
-; SSE2-NEXT:    por %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_cmp_v2i64:
@@ -790,26 +768,32 @@ define <2 x i64> @abd_subnsw_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 define <2 x i64> @abd_cmp_v2i64_multiuse_cmp(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_cmp_v2i64_multiuse_cmp:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    psubq %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648]
 ; SSE2-NEXT:    movdqa %xmm1, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm3
-; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
-; SSE2-NEXT:    pxor %xmm4, %xmm0
-; SSE2-NEXT:    pxor %xmm4, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm4
-; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm0, %xmm1
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
-; SSE2-NEXT:    pand %xmm5, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
-; SSE2-NEXT:    por %xmm0, %xmm1
-; SSE2-NEXT:    pand %xmm1, %xmm3
-; SSE2-NEXT:    pcmpeqd %xmm0, %xmm0
-; SSE2-NEXT:    pxor %xmm1, %xmm0
-; SSE2-NEXT:    pandn %xmm2, %xmm1
-; SSE2-NEXT:    por %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    psubq %xmm0, %xmm5
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    pandn %xmm5, %xmm4
+; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    pcmpeqd %xmm1, %xmm1
+; SSE2-NEXT:    pxor %xmm2, %xmm1
 ; SSE2-NEXT:    paddq %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/abds.ll b/llvm/test/CodeGen/X86/abds.ll
index a80339427984a..f8245ba8953d3 100644
--- a/llvm/test/CodeGen/X86/abds.ll
+++ b/llvm/test/CodeGen/X86/abds.ll
@@ -282,27 +282,24 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_ext_i64:
 ; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sarl $31, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sbbl %edi, %esi
-; X86-NEXT:    sbbl %edi, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovll %edi, %eax
+; X86-NEXT:    cmovll %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i64:
@@ -324,27 +321,24 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_ext_i64_undef:
 ; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sarl $31, %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %edx, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl %esi, %edx
-; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:    sbbl %edi, %esi
-; X86-NEXT:    sbbl %edi, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovll %edi, %eax
+; X86-NEXT:    cmovll %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i64_undef:
@@ -454,7 +448,6 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_minmax_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
@@ -462,24 +455,17 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sbbl %edx, %edi
-; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    cmovll %esi, %edi
-; X86-NEXT:    movl %eax, %ebx
-; X86-NEXT:    cmovll %ecx, %ebx
-; X86-NEXT:    cmpl %ecx, %eax
-; X86-NEXT:    movl %edx, %ebp
-; X86-NEXT:    sbbl %esi, %ebp
-; X86-NEXT:    cmovll %esi, %edx
-; X86-NEXT:    cmovll %ecx, %eax
-; X86-NEXT:    subl %ebx, %eax
-; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    subl %ecx, %eax
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovll %edi, %eax
+; X86-NEXT:    cmovll %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_minmax_i64:
diff --git a/llvm/test/CodeGen/X86/abdu-vector-128.ll b/llvm/test/CodeGen/X86/abdu-vector-128.ll
index 0c33e8973c2d2..8ebafecdcc346 100644
--- a/llvm/test/CodeGen/X86/abdu-vector-128.ll
+++ b/llvm/test/CodeGen/X86/abdu-vector-128.ll
@@ -196,27 +196,24 @@ define <4 x i32> @abd_ext_v4i32_undef(<4 x i32> %a, <4 x i32> %b) nounwind {
 define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_ext_v2i64:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT:    movq %xmm2, %rax
-; SSE2-NEXT:    movq %xmm0, %rcx
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT:    movq %xmm0, %rdx
-; SSE2-NEXT:    movq %xmm1, %rsi
-; SSE2-NEXT:    xorl %edi, %edi
-; SSE2-NEXT:    subq %rsi, %rcx
-; SSE2-NEXT:    movl $0, %esi
-; SSE2-NEXT:    sbbq %rsi, %rsi
-; SSE2-NEXT:    subq %rdx, %rax
-; SSE2-NEXT:    sbbq %rdi, %rdi
-; SSE2-NEXT:    sarq $63, %rdi
-; SSE2-NEXT:    xorq %rdi, %rax
-; SSE2-NEXT:    subq %rdi, %rax
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    xorq %rsi, %rcx
-; SSE2-NEXT:    subq %rsi, %rcx
-; SSE2-NEXT:    movq %rcx, %xmm0
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_ext_v2i64:
@@ -274,27 +271,24 @@ define <2 x i64> @abd_ext_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 define <2 x i64> @abd_ext_v2i64_undef(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_ext_v2i64_undef:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT:    movq %xmm2, %rax
-; SSE2-NEXT:    movq %xmm0, %rcx
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT:    movq %xmm0, %rdx
-; SSE2-NEXT:    movq %xmm1, %rsi
-; SSE2-NEXT:    xorl %edi, %edi
-; SSE2-NEXT:    subq %rsi, %rcx
-; SSE2-NEXT:    movl $0, %esi
-; SSE2-NEXT:    sbbq %rsi, %rsi
-; SSE2-NEXT:    subq %rdx, %rax
-; SSE2-NEXT:    sbbq %rdi, %rdi
-; SSE2-NEXT:    sarq $63, %rdi
-; SSE2-NEXT:    xorq %rdi, %rax
-; SSE2-NEXT:    subq %rdi, %rax
-; SSE2-NEXT:    sarq $63, %rsi
-; SSE2-NEXT:    xorq %rsi, %rcx
-; SSE2-NEXT:    subq %rsi, %rcx
-; SSE2-NEXT:    movq %rcx, %xmm0
-; SSE2-NEXT:    movq %rax, %xmm1
-; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_ext_v2i64_undef:
@@ -454,15 +448,12 @@ define <2 x i64> @abd_minmax_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-NEXT:    pand %xmm5, %xmm2
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
 ; SSE2-NEXT:    por %xmm2, %xmm3
-; SSE2-NEXT:    movdqa %xmm3, %xmm2
-; SSE2-NEXT:    pandn %xmm0, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm1, %xmm4
-; SSE2-NEXT:    pand %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm2, %xmm1
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
 ; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_minmax_v2i64:
@@ -612,24 +603,23 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_cmp_v2i64:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
 ; SSE2-NEXT:    pxor %xmm2, %xmm3
-; SSE2-NEXT:    pxor %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm0, %xmm2
 ; SSE2-NEXT:    movdqa %xmm2, %xmm4
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
 ; SSE2-NEXT:    pcmpeqd %xmm3, %xmm2
-; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE2-NEXT:    pand %xmm5, %xmm3
-; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[1,1,3,3]
-; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm0, %xmm3
-; SSE2-NEXT:    psubq %xmm1, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm1
-; SSE2-NEXT:    pand %xmm2, %xmm1
-; SSE2-NEXT:    pandn %xmm3, %xmm2
-; SSE2-NEXT:    por %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pand %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    por %xmm3, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_cmp_v2i64:
@@ -690,27 +680,26 @@ define <2 x i64> @abd_cmp_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
 define <2 x i64> @abd_cmp_v2i64_multiuse_cmp(<2 x i64> %a, <2 x i64> %b) nounwind {
 ; SSE2-LABEL: abd_cmp_v2i64_multiuse_cmp:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    psubq %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm1, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm3
-; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
-; SSE2-NEXT:    pxor %xmm4, %xmm1
-; SSE2-NEXT:    pxor %xmm4, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm4
-; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
-; SSE2-NEXT:    pcmpeqd %xmm1, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT:    pand %xmm5, %xmm0
-; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
-; SSE2-NEXT:    por %xmm0, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, %xmm0
-; SSE2-NEXT:    pandn %xmm3, %xmm0
-; SSE2-NEXT:    pand %xmm1, %xmm2
-; SSE2-NEXT:    por %xmm0, %xmm2
-; SSE2-NEXT:    paddq %xmm1, %xmm2
-; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    psubq %xmm0, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pxor %xmm3, %xmm4
+; SSE2-NEXT:    pxor %xmm0, %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm3
+; SSE2-NEXT:    pandn %xmm2, %xmm3
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm4, %xmm0
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    paddq %xmm4, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: abd_cmp_v2i64_multiuse_cmp:
diff --git a/llvm/test/CodeGen/X86/abdu.ll b/llvm/test/CodeGen/X86/abdu.ll
index 11719be4ab5cd..231b69fd7be18 100644
--- a/llvm/test/CodeGen/X86/abdu.ll
+++ b/llvm/test/CodeGen/X86/abdu.ll
@@ -280,21 +280,24 @@ define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
 define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_ext_i64:
 ; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl $0, %esi
-; X86-NEXT:    sbbl %esi, %esi
-; X86-NEXT:    sbbl %ecx, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovbl %edi, %eax
+; X86-NEXT:    cmovbl %ebx, %edx
 ; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i64:
@@ -316,21 +319,24 @@ define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
 define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_ext_i64_undef:
 ; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl $0, %esi
-; X86-NEXT:    sbbl %esi, %esi
-; X86-NEXT:    sbbl %ecx, %ecx
-; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    xorl %ecx, %edx
-; X86-NEXT:    xorl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    subl %ecx, %eax
-; X86-NEXT:    sbbl %ecx, %edx
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovbl %edi, %eax
+; X86-NEXT:    cmovbl %ebx, %edx
 ; X86-NEXT:    popl %esi
+; X86-NEXT:    popl %edi
+; X86-NEXT:    popl %ebx
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_ext_i64_undef:
@@ -440,7 +446,6 @@ define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
 define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X86-LABEL: abd_minmax_i64:
 ; X86:       # %bb.0:
-; X86-NEXT:    pushl %ebp
 ; X86-NEXT:    pushl %ebx
 ; X86-NEXT:    pushl %edi
 ; X86-NEXT:    pushl %esi
@@ -448,24 +453,17 @@ define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    cmpl %eax, %ecx
-; X86-NEXT:    movl %esi, %edi
-; X86-NEXT:    sbbl %edx, %edi
-; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    cmovbl %esi, %edi
-; X86-NEXT:    movl %eax, %ebx
-; X86-NEXT:    cmovbl %ecx, %ebx
-; X86-NEXT:    cmpl %ecx, %eax
-; X86-NEXT:    movl %edx, %ebp
-; X86-NEXT:    sbbl %esi, %ebp
-; X86-NEXT:    cmovbl %esi, %edx
-; X86-NEXT:    cmovbl %ecx, %eax
-; X86-NEXT:    subl %ebx, %eax
-; X86-NEXT:    sbbl %edi, %edx
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    subl %eax, %edi
+; X86-NEXT:    movl %esi, %ebx
+; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    subl %ecx, %eax
+; X86-NEXT:    sbbl %esi, %edx
+; X86-NEXT:    cmovbl %edi, %eax
+; X86-NEXT:    cmovbl %ebx, %edx
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
 ; X86-NEXT:    popl %ebx
-; X86-NEXT:    popl %ebp
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: abd_minmax_i64:
diff --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index c6e8b75325050..a3183fc9ac0a6 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -857,15 +857,12 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
 ; SSE2-NEXT:    por %xmm2, %xmm3
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1]
 ; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm1, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm4, %xmm1
-; SSE2-NEXT:    pand %xmm0, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    psubq %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psubq %xmm1, %xmm4
+; SSE2-NEXT:    psubq %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
 ; SSE2-NEXT:    psrlq $1, %xmm1
 ; SSE2-NEXT:    psrlq $33, %xmm3
@@ -1040,15 +1037,12 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
 ; SSE2-NEXT:    por %xmm2, %xmm3
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1]
 ; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm1, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm4, %xmm1
-; SSE2-NEXT:    pand %xmm0, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    psubq %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psubq %xmm1, %xmm4
+; SSE2-NEXT:    psubq %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
 ; SSE2-NEXT:    psrlq $1, %xmm1
 ; SSE2-NEXT:    psrlq $33, %xmm3
@@ -1252,15 +1246,12 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
 ; SSE2-NEXT:    por %xmm2, %xmm3
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1]
 ; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm0, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
-; SSE2-NEXT:    pand %xmm1, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psubq %xmm0, %xmm4
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm3
+; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm0
 ; SSE2-NEXT:    psrlq $1, %xmm0
 ; SSE2-NEXT:    psrlq $33, %xmm3
@@ -1442,15 +1433,12 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
 ; SSE2-NEXT:    por %xmm2, %xmm3
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1]
 ; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm0, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm1, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm1
-; SSE2-NEXT:    por %xmm4, %xmm1
-; SSE2-NEXT:    pand %xmm0, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    psubq %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psubq %xmm1, %xmm4
+; SSE2-NEXT:    psubq %xmm0, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm1
 ; SSE2-NEXT:    psrlq $1, %xmm1
 ; SSE2-NEXT:    psrlq $33, %xmm3
@@ -1633,15 +1621,12 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; SSE2-NEXT:    por %xmm2, %xmm3
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1]
 ; SSE2-NEXT:    por %xmm3, %xmm2
-; SSE2-NEXT:    movdqa %xmm3, %xmm4
-; SSE2-NEXT:    pandn %xmm1, %xmm4
-; SSE2-NEXT:    movdqa %xmm3, %xmm5
-; SSE2-NEXT:    pandn %xmm0, %xmm5
-; SSE2-NEXT:    pand %xmm3, %xmm0
-; SSE2-NEXT:    por %xmm4, %xmm0
-; SSE2-NEXT:    pand %xmm1, %xmm3
-; SSE2-NEXT:    por %xmm5, %xmm3
-; SSE2-NEXT:    psubq %xmm0, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psubq %xmm0, %xmm4
+; SSE2-NEXT:    psubq %xmm1, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm3
+; SSE2-NEXT:    por %xmm4, %xmm3
 ; SSE2-NEXT:    movdqa %xmm3, %xmm0
 ; SSE2-NEXT:    psrlq $1, %xmm0
 ; SSE2-NEXT:    psrlq $33, %xmm3



More information about the llvm-commits mailing list